From 6c0515c9880b5310dd0647a5881233f07898d812 Mon Sep 17 00:00:00 2001 From: Gregory Thiemonge Date: Wed, 1 Mar 2023 15:46:38 +0100 Subject: [PATCH] amphorav1 removal Removing the amphorav1 provider, it was deprecated in Zed and can now be removed in Bobcat 2023.2. Change-Id: I2ecfc0f40549d80b3058b76c619ff4ef35aadb97 --- doc/source/conf.py | 2 - doc/source/contributor/devref/flows.rst | 12 - .../api/drivers/amphora_driver/v1/__init__.py | 11 - .../api/drivers/amphora_driver/v1/driver.py | 547 ---- octavia/api/v2/controllers/amphora.py | 16 +- octavia/cmd/octavia_worker.py | 3 - octavia/common/constants.py | 2 - .../healthmanager/health_manager.py | 6 +- .../controller/housekeeping/house_keeping.py | 7 +- octavia/controller/queue/v1/__init__.py | 11 - octavia/controller/queue/v1/consumer.py | 64 - octavia/controller/queue/v1/endpoints.py | 160 - octavia/controller/worker/v1/__init__.py | 11 - .../controller/worker/v1/controller_worker.py | 1157 ------- .../controller/worker/v1/flows/__init__.py | 11 - .../worker/v1/flows/amphora_flows.py | 610 ---- .../worker/v1/flows/health_monitor_flows.py | 105 - .../worker/v1/flows/l7policy_flows.py | 94 - .../worker/v1/flows/l7rule_flows.py | 100 - .../worker/v1/flows/listener_flows.py | 128 - .../worker/v1/flows/load_balancer_flows.py | 686 ---- .../worker/v1/flows/member_flows.py | 230 -- .../controller/worker/v1/flows/pool_flows.py | 127 - .../controller/worker/v1/tasks/__init__.py | 11 - .../worker/v1/tasks/amphora_driver_tasks.py | 453 --- .../controller/worker/v1/tasks/cert_task.py | 51 - .../worker/v1/tasks/compute_tasks.py | 335 -- .../worker/v1/tasks/database_tasks.py | 2764 ----------------- .../worker/v1/tasks/lifecycle_tasks.py | 173 -- .../controller/worker/v1/tasks/model_tasks.py | 41 - .../worker/v1/tasks/network_tasks.py | 970 ------ .../controller/worker/v1/tasks/retry_tasks.py | 74 - .../api/drivers/amphora_driver/v1/__init__.py | 11 - .../drivers/amphora_driver/v1/test_driver.py | 824 ----- .../healthmanager/test_health_manager.py | 26 +- .../housekeeping/test_house_keeping.py | 67 +- .../unit/controller/queue/v1/__init__.py | 11 - .../unit/controller/queue/v1/test_consumer.py | 61 - .../controller/queue/v1/test_endpoints.py | 189 -- .../unit/controller/worker/v1/__init__.py | 11 - .../controller/worker/v1/flows/__init__.py | 11 - .../worker/v1/flows/test_amphora_flows.py | 474 --- .../v1/flows/test_health_monitor_flows.py | 72 - .../worker/v1/flows/test_l7policy_flows.py | 67 - .../worker/v1/flows/test_l7rule_flows.py | 67 - .../worker/v1/flows/test_listener_flows.py | 91 - .../v1/flows/test_load_balancer_flows.py | 430 --- .../worker/v1/flows/test_member_flows.py | 106 - .../worker/v1/flows/test_pool_flows.py | 77 - .../controller/worker/v1/tasks/__init__.py | 11 - .../v1/tasks/test_amphora_driver_tasks.py | 792 ----- .../worker/v1/tasks/test_cert_task.py | 46 - .../worker/v1/tasks/test_compute_tasks.py | 634 ---- .../worker/v1/tasks/test_database_tasks.py | 2617 ---------------- .../v1/tasks/test_database_tasks_quota.py | 415 --- .../worker/v1/tasks/test_lifecycle_tasks.py | 401 --- .../worker/v1/tasks/test_model_tasks.py | 44 - .../worker/v1/tasks/test_network_tasks.py | 1788 ----------- .../worker/v1/tasks/test_retry_tasks.py | 47 - .../worker/v1/test_controller_worker.py | 2096 ------------- .../v2/tasks/test_amphora_driver_tasks.py | 2 +- .../worker/v2/test_controller_worker.py | 4 +- .../removing-amphorav1-ff43992c07a2071d.yaml | 10 + setup.cfg | 3 +- tools/check_unit_test_structure.sh | 1 - tools/flow-list.txt | 32 - 66 files changed, 29 insertions(+), 20481 deletions(-) delete mode 100644 octavia/api/drivers/amphora_driver/v1/__init__.py delete mode 100644 octavia/api/drivers/amphora_driver/v1/driver.py delete mode 100644 octavia/controller/queue/v1/__init__.py delete mode 100644 octavia/controller/queue/v1/consumer.py delete mode 100644 octavia/controller/queue/v1/endpoints.py delete mode 100644 octavia/controller/worker/v1/__init__.py delete mode 100644 octavia/controller/worker/v1/controller_worker.py delete mode 100644 octavia/controller/worker/v1/flows/__init__.py delete mode 100644 octavia/controller/worker/v1/flows/amphora_flows.py delete mode 100644 octavia/controller/worker/v1/flows/health_monitor_flows.py delete mode 100644 octavia/controller/worker/v1/flows/l7policy_flows.py delete mode 100644 octavia/controller/worker/v1/flows/l7rule_flows.py delete mode 100644 octavia/controller/worker/v1/flows/listener_flows.py delete mode 100644 octavia/controller/worker/v1/flows/load_balancer_flows.py delete mode 100644 octavia/controller/worker/v1/flows/member_flows.py delete mode 100644 octavia/controller/worker/v1/flows/pool_flows.py delete mode 100644 octavia/controller/worker/v1/tasks/__init__.py delete mode 100644 octavia/controller/worker/v1/tasks/amphora_driver_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/cert_task.py delete mode 100644 octavia/controller/worker/v1/tasks/compute_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/database_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/lifecycle_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/model_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/network_tasks.py delete mode 100644 octavia/controller/worker/v1/tasks/retry_tasks.py delete mode 100644 octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py delete mode 100644 octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py delete mode 100644 octavia/tests/unit/controller/queue/v1/__init__.py delete mode 100644 octavia/tests/unit/controller/queue/v1/test_consumer.py delete mode 100644 octavia/tests/unit/controller/queue/v1/test_endpoints.py delete mode 100644 octavia/tests/unit/controller/worker/v1/__init__.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/__init__.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/__init__.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py delete mode 100644 octavia/tests/unit/controller/worker/v1/test_controller_worker.py create mode 100644 releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml delete mode 100644 tools/flow-list.txt diff --git a/doc/source/conf.py b/doc/source/conf.py index 17e52265f9..2ae73c1c94 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -27,8 +27,6 @@ sys.path.insert(0, os.path.abspath('.')) from tools import create_flow_docs # Generate our flow diagrams -create_flow_docs.generate( - 'tools/flow-list.txt', 'doc/source/contributor/devref/flow_diagrams') create_flow_docs.generate( 'tools/flow-list-v2.txt', 'doc/source/contributor/devref/flow_diagrams_v2') diff --git a/doc/source/contributor/devref/flows.rst b/doc/source/contributor/devref/flows.rst index 93cd0a449b..47f18824da 100644 --- a/doc/source/contributor/devref/flows.rst +++ b/doc/source/contributor/devref/flows.rst @@ -8,18 +8,6 @@ controller needs to take while managing load balancers. This document is meant as a reference for the key flows used in the Octavia controller. -.. toctree:: - :maxdepth: 1 - - flow_diagrams/AmphoraFlows.rst - flow_diagrams/HealthMonitorFlows.rst - flow_diagrams/L7PolicyFlows.rst - flow_diagrams/L7RuleFlows.rst - flow_diagrams/ListenerFlows.rst - flow_diagrams/LoadBalancerFlows.rst - flow_diagrams/MemberFlows.rst - flow_diagrams/PoolFlows.rst - The following are flow diagrams for the **amphora V2** driver. .. toctree:: diff --git a/octavia/api/drivers/amphora_driver/v1/__init__.py b/octavia/api/drivers/amphora_driver/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/api/drivers/amphora_driver/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/api/drivers/amphora_driver/v1/driver.py b/octavia/api/drivers/amphora_driver/v1/driver.py deleted file mode 100644 index 7888ad01d1..0000000000 --- a/octavia/api/drivers/amphora_driver/v1/driver.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright 2018 Rackspace, US Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from jsonschema import exceptions as js_exceptions -from jsonschema import validate - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from stevedore import driver as stevedore_driver - -from octavia_lib.api.drivers import data_models as driver_dm -from octavia_lib.api.drivers import exceptions -from octavia_lib.api.drivers import provider_base as driver_base -from octavia_lib.common import constants as lib_consts - -from octavia.api.drivers.amphora_driver import availability_zone_schema -from octavia.api.drivers.amphora_driver import flavor_schema -from octavia.api.drivers import utils as driver_utils -from octavia.common import constants as consts -from octavia.common import data_models -from octavia.common import rpc -from octavia.common import utils -from octavia.db import api as db_apis -from octavia.db import repositories -from octavia.network import base as network_base - -CONF = cfg.CONF -CONF.import_group('oslo_messaging', 'octavia.common.config') -LOG = logging.getLogger(__name__) -AMPHORA_SUPPORTED_LB_ALGORITHMS = [ - consts.LB_ALGORITHM_ROUND_ROBIN, - consts.LB_ALGORITHM_SOURCE_IP, - consts.LB_ALGORITHM_LEAST_CONNECTIONS] - -AMPHORA_SUPPORTED_PROTOCOLS = [ - lib_consts.PROTOCOL_TCP, - lib_consts.PROTOCOL_HTTP, - lib_consts.PROTOCOL_HTTPS, - lib_consts.PROTOCOL_TERMINATED_HTTPS, - lib_consts.PROTOCOL_PROXY, - lib_consts.PROTOCOL_PROXYV2, - lib_consts.PROTOCOL_UDP, - lib_consts.PROTOCOL_SCTP, - lib_consts.PROTOCOL_PROMETHEUS, -] - -VALID_L7POLICY_LISTENER_PROTOCOLS = [ - lib_consts.PROTOCOL_HTTP, - lib_consts.PROTOCOL_TERMINATED_HTTPS -] - - -class AmphoraProviderDriver(driver_base.ProviderDriver): - def __init__(self): - super().__init__() - topic = cfg.CONF.oslo_messaging.topic - self.target = messaging.Target( - namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT, - topic=topic, version="1.0", fanout=False) - self.client = rpc.get_client(self.target) - self.repositories = repositories.Repositories() - - def _validate_pool_algorithm(self, pool): - if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS: - msg = ('Amphora provider does not support %s algorithm.' - % pool.lb_algorithm) - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - - def _validate_listener_protocol(self, listener): - if listener.protocol not in AMPHORA_SUPPORTED_PROTOCOLS: - msg = ('Amphora provider does not support %s protocol. ' - 'Supported: %s' - % (listener.protocol, - ", ".join(AMPHORA_SUPPORTED_PROTOCOLS))) - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - - def _validate_alpn_protocols(self, obj): - if not obj.alpn_protocols: - return - supported = consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS - not_supported = set(obj.alpn_protocols) - set(supported) - if not_supported: - msg = ('Amphora provider does not support %s ALPN protocol(s). ' - 'Supported: %s' - % (", ".join(not_supported), ", ".join(supported))) - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - - # Load Balancer - def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary, - additional_vip_dicts=None): - if additional_vip_dicts: - msg = 'Amphora v1 driver does not support additional_vips.' - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary) - lb_obj = data_models.LoadBalancer(id=loadbalancer_id, - project_id=project_id, vip=vip_obj) - - network_driver = utils.get_network_driver() - vip_network = network_driver.get_network( - vip_dictionary[lib_consts.VIP_NETWORK_ID]) - if not vip_network.port_security_enabled: - message = "Port security must be enabled on the VIP network." - raise exceptions.DriverError(user_fault_string=message, - operator_fault_string=message) - - try: - # allocated_vip returns (vip, add_vips), skipping the 2nd element - # as amphorav1 doesn't support add_vips - vip = network_driver.allocate_vip(lb_obj)[0] - except network_base.AllocateVIPException as e: - message = str(e) - if getattr(e, 'orig_msg', None) is not None: - message = e.orig_msg - raise exceptions.DriverError(user_fault_string=message, - operator_fault_string=message) - - LOG.info('Amphora provider created VIP port %s for load balancer %s.', - vip.port_id, loadbalancer_id) - return driver_utils.vip_dict_to_provider_dict(vip.to_dict()), [] - - # TODO(johnsom) convert this to octavia_lib constant flavor - # once octavia is transitioned to use octavia_lib - def loadbalancer_create(self, loadbalancer): - if loadbalancer.flavor == driver_dm.Unset: - loadbalancer.flavor = None - if loadbalancer.availability_zone == driver_dm.Unset: - loadbalancer.availability_zone = None - payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id, - consts.FLAVOR: loadbalancer.flavor, - consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} - self.client.cast({}, 'create_load_balancer', **payload) - - def loadbalancer_delete(self, loadbalancer, cascade=False): - loadbalancer_id = loadbalancer.loadbalancer_id - payload = {consts.LOAD_BALANCER_ID: loadbalancer_id, - 'cascade': cascade} - self.client.cast({}, 'delete_load_balancer', **payload) - - def loadbalancer_failover(self, loadbalancer_id): - payload = {consts.LOAD_BALANCER_ID: loadbalancer_id} - self.client.cast({}, 'failover_load_balancer', **payload) - - def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): - # Adapt the provider data model to the queue schema - lb_dict = new_loadbalancer.to_dict() - if 'admin_state_up' in lb_dict: - lb_dict['enabled'] = lb_dict.pop('admin_state_up') - lb_id = lb_dict.pop('loadbalancer_id') - # Put the qos_policy_id back under the vip element the controller - # expects - vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None) - if vip_qos_policy_id: - vip_dict = {"qos_policy_id": vip_qos_policy_id} - lb_dict["vip"] = vip_dict - - payload = {consts.LOAD_BALANCER_ID: lb_id, - consts.LOAD_BALANCER_UPDATES: lb_dict} - self.client.cast({}, 'update_load_balancer', **payload) - - # Listener - def listener_create(self, listener): - self._validate_listener_protocol(listener) - self._validate_alpn_protocols(listener) - payload = {consts.LISTENER_ID: listener.listener_id} - self.client.cast({}, 'create_listener', **payload) - - def listener_delete(self, listener): - listener_id = listener.listener_id - payload = {consts.LISTENER_ID: listener_id} - self.client.cast({}, 'delete_listener', **payload) - - def listener_update(self, old_listener, new_listener): - self._validate_alpn_protocols(new_listener) - listener_dict = new_listener.to_dict() - if 'admin_state_up' in listener_dict: - listener_dict['enabled'] = listener_dict.pop('admin_state_up') - listener_id = listener_dict.pop('listener_id') - if 'client_ca_tls_container_ref' in listener_dict: - listener_dict['client_ca_tls_container_id'] = listener_dict.pop( - 'client_ca_tls_container_ref') - listener_dict.pop('client_ca_tls_container_data', None) - if 'client_crl_container_ref' in listener_dict: - listener_dict['client_crl_container_id'] = listener_dict.pop( - 'client_crl_container_ref') - listener_dict.pop('client_crl_container_data', None) - - payload = {consts.LISTENER_ID: listener_id, - consts.LISTENER_UPDATES: listener_dict} - self.client.cast({}, 'update_listener', **payload) - - # Pool - def pool_create(self, pool): - self._validate_pool_algorithm(pool) - self._validate_alpn_protocols(pool) - payload = {consts.POOL_ID: pool.pool_id} - self.client.cast({}, 'create_pool', **payload) - - def pool_delete(self, pool): - pool_id = pool.pool_id - payload = {consts.POOL_ID: pool_id} - self.client.cast({}, 'delete_pool', **payload) - - def pool_update(self, old_pool, new_pool): - self._validate_alpn_protocols(new_pool) - if new_pool.lb_algorithm: - self._validate_pool_algorithm(new_pool) - pool_dict = new_pool.to_dict() - if 'admin_state_up' in pool_dict: - pool_dict['enabled'] = pool_dict.pop('admin_state_up') - pool_id = pool_dict.pop('pool_id') - if 'tls_container_ref' in pool_dict: - pool_dict['tls_certificate_id'] = pool_dict.pop( - 'tls_container_ref') - pool_dict.pop('tls_container_data', None) - if 'ca_tls_container_ref' in pool_dict: - pool_dict['ca_tls_certificate_id'] = pool_dict.pop( - 'ca_tls_container_ref') - pool_dict.pop('ca_tls_container_data', None) - if 'crl_container_ref' in pool_dict: - pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref') - pool_dict.pop('crl_container_data', None) - - payload = {consts.POOL_ID: pool_id, - consts.POOL_UPDATES: pool_dict} - self.client.cast({}, 'update_pool', **payload) - - # Member - def member_create(self, member): - pool_id = member.pool_id - db_pool = self.repositories.pool.get(db_apis.get_session(), - id=pool_id) - self._validate_members(db_pool, [member]) - - payload = {consts.MEMBER_ID: member.member_id} - self.client.cast({}, 'create_member', **payload) - - def member_delete(self, member): - member_id = member.member_id - payload = {consts.MEMBER_ID: member_id} - self.client.cast({}, 'delete_member', **payload) - - def member_update(self, old_member, new_member): - member_dict = new_member.to_dict() - if 'admin_state_up' in member_dict: - member_dict['enabled'] = member_dict.pop('admin_state_up') - member_id = member_dict.pop('member_id') - - payload = {consts.MEMBER_ID: member_id, - consts.MEMBER_UPDATES: member_dict} - self.client.cast({}, 'update_member', **payload) - - def member_batch_update(self, pool_id, members): - # The DB should not have updated yet, so we can still use the pool - db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) - - self._validate_members(db_pool, members) - - old_members = db_pool.members - - old_member_ids = [m.id for m in old_members] - # The driver will always pass objects with IDs. - new_member_ids = [m.member_id for m in members] - - # Find members that are brand new or updated - new_members = [] - updated_members = [] - for m in members: - if m.member_id not in old_member_ids: - new_members.append(m) - else: - member_dict = m.to_dict(render_unsets=False) - member_dict['id'] = member_dict.pop('member_id') - if 'address' in member_dict: - member_dict['ip_address'] = member_dict.pop('address') - if 'admin_state_up' in member_dict: - member_dict['enabled'] = member_dict.pop('admin_state_up') - updated_members.append(member_dict) - - # Find members that are deleted - deleted_members = [] - for m in old_members: - if m.id not in new_member_ids: - deleted_members.append(m) - - payload = {'old_member_ids': [m.id for m in deleted_members], - 'new_member_ids': [m.member_id for m in new_members], - 'updated_members': updated_members} - self.client.cast({}, 'batch_update_members', **payload) - - def _validate_members(self, db_pool, members): - if db_pool.protocol in consts.LVS_PROTOCOLS: - # For SCTP/UDP LBs, check that we are not mixing IPv4 and IPv6 - for member in members: - member_is_ipv6 = utils.is_ipv6(member.address) - - for listener in db_pool.listeners: - lb = listener.load_balancer - vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address) - - if member_is_ipv6 != vip_is_ipv6: - msg = ("This provider doesn't support mixing IPv4 and " - "IPv6 addresses for its VIP and members in {} " - "load balancers.".format(db_pool.protocol)) - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - - # Health Monitor - def health_monitor_create(self, healthmonitor): - payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id} - self.client.cast({}, 'create_health_monitor', **payload) - - def health_monitor_delete(self, healthmonitor): - healthmonitor_id = healthmonitor.healthmonitor_id - payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id} - self.client.cast({}, 'delete_health_monitor', **payload) - - def health_monitor_update(self, old_healthmonitor, new_healthmonitor): - healthmon_dict = new_healthmonitor.to_dict() - if 'admin_state_up' in healthmon_dict: - healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up') - if 'max_retries_down' in healthmon_dict: - healthmon_dict['fall_threshold'] = healthmon_dict.pop( - 'max_retries_down') - if 'max_retries' in healthmon_dict: - healthmon_dict['rise_threshold'] = healthmon_dict.pop( - 'max_retries') - healthmon_id = healthmon_dict.pop('healthmonitor_id') - - payload = {consts.HEALTH_MONITOR_ID: healthmon_id, - consts.HEALTH_MONITOR_UPDATES: healthmon_dict} - self.client.cast({}, 'update_health_monitor', **payload) - - # L7 Policy - def l7policy_create(self, l7policy): - db_listener = self.repositories.listener.get(db_apis.get_session(), - id=l7policy.listener_id) - if db_listener.protocol not in VALID_L7POLICY_LISTENER_PROTOCOLS: - msg = ('%s protocol listeners do not support L7 policies' % ( - db_listener.protocol)) - raise exceptions.UnsupportedOptionError( - user_fault_string=msg, - operator_fault_string=msg) - payload = {consts.L7POLICY_ID: l7policy.l7policy_id} - self.client.cast({}, 'create_l7policy', **payload) - - def l7policy_delete(self, l7policy): - l7policy_id = l7policy.l7policy_id - payload = {consts.L7POLICY_ID: l7policy_id} - self.client.cast({}, 'delete_l7policy', **payload) - - def l7policy_update(self, old_l7policy, new_l7policy): - l7policy_dict = new_l7policy.to_dict() - if 'admin_state_up' in l7policy_dict: - l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up') - l7policy_id = l7policy_dict.pop('l7policy_id') - - payload = {consts.L7POLICY_ID: l7policy_id, - consts.L7POLICY_UPDATES: l7policy_dict} - self.client.cast({}, 'update_l7policy', **payload) - - # L7 Rule - def l7rule_create(self, l7rule): - payload = {consts.L7RULE_ID: l7rule.l7rule_id} - self.client.cast({}, 'create_l7rule', **payload) - - def l7rule_delete(self, l7rule): - l7rule_id = l7rule.l7rule_id - payload = {consts.L7RULE_ID: l7rule_id} - self.client.cast({}, 'delete_l7rule', **payload) - - def l7rule_update(self, old_l7rule, new_l7rule): - l7rule_dict = new_l7rule.to_dict() - if 'admin_state_up' in l7rule_dict: - l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up') - l7rule_id = l7rule_dict.pop('l7rule_id') - - payload = {consts.L7RULE_ID: l7rule_id, - consts.L7RULE_UPDATES: l7rule_dict} - self.client.cast({}, 'update_l7rule', **payload) - - # Flavor - def get_supported_flavor_metadata(self): - """Returns the valid flavor metadata keys and descriptions. - - This extracts the valid flavor metadata keys and descriptions - from the JSON validation schema and returns it as a dictionary. - - :return: Dictionary of flavor metadata keys and descriptions. - :raises DriverError: An unexpected error occurred. - """ - try: - props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties'] - return {k: v.get('description', '') for k, v in props.items()} - except Exception as e: - raise exceptions.DriverError( - user_fault_string='Failed to get the supported flavor ' - 'metadata due to: {}'.format(str(e)), - operator_fault_string='Failed to get the supported flavor ' - 'metadata due to: {}'.format(str(e))) - - def validate_flavor(self, flavor_dict): - """Validates flavor profile data. - - This will validate a flavor profile dataset against the flavor - settings the amphora driver supports. - - :param flavor_dict: The flavor dictionary to validate. - :type flavor: dict - :return: None - :raises DriverError: An unexpected error occurred. - :raises UnsupportedOptionError: If the driver does not support - one of the flavor settings. - """ - try: - validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA) - except js_exceptions.ValidationError as e: - error_object = '' - if e.relative_path: - error_object = '{} '.format(e.relative_path[0]) - raise exceptions.UnsupportedOptionError( - user_fault_string='{0}{1}'.format(error_object, e.message), - operator_fault_string=str(e)) - except Exception as e: - raise exceptions.DriverError( - user_fault_string='Failed to validate the flavor metadata ' - 'due to: {}'.format(str(e)), - operator_fault_string='Failed to validate the flavor metadata ' - 'due to: {}'.format(str(e))) - compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None) - if compute_flavor: - compute_driver = stevedore_driver.DriverManager( - namespace='octavia.compute.drivers', - name=CONF.controller_worker.compute_driver, - invoke_on_load=True - ).driver - - # TODO(johnsom) Fix this to raise a NotFound error - # when the octavia-lib supports it. - compute_driver.validate_flavor(compute_flavor) - - amp_image_tag = flavor_dict.get(consts.AMP_IMAGE_TAG, None) - if amp_image_tag: - image_driver = stevedore_driver.DriverManager( - namespace='octavia.image.drivers', - name=CONF.controller_worker.image_driver, - invoke_on_load=True - ).driver - - try: - image_driver.get_image_id_by_tag( - amp_image_tag, CONF.controller_worker.amp_image_owner_id) - except Exception as e: - raise exceptions.NotFound( - user_fault_string='Failed to find an image with tag {} ' - 'due to: {}'.format( - amp_image_tag, str(e)), - operator_fault_string='Failed to find an image with tag ' - '{} due to: {}'.format( - amp_image_tag, str(e))) - - # Availability Zone - def get_supported_availability_zone_metadata(self): - """Returns the valid availability zone metadata keys and descriptions. - - This extracts the valid availability zone metadata keys and - descriptions from the JSON validation schema and returns it as a - dictionary. - - :return: Dictionary of availability zone metadata keys and descriptions - :raises DriverError: An unexpected error occurred. - """ - try: - props = ( - availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[ - 'properties']) - return {k: v.get('description', '') for k, v in props.items()} - except Exception as e: - raise exceptions.DriverError( - user_fault_string='Failed to get the supported availability ' - 'zone metadata due to: {}'.format(str(e)), - operator_fault_string='Failed to get the supported ' - 'availability zone metadata due to: ' - '{}'.format(str(e))) - - def validate_availability_zone(self, availability_zone_dict): - """Validates availability zone profile data. - - This will validate an availability zone profile dataset against the - availability zone settings the amphora driver supports. - - :param availability_zone_dict: The availability zone dict to validate. - :type availability_zone_dict: dict - :return: None - :raises DriverError: An unexpected error occurred. - :raises UnsupportedOptionError: If the driver does not support - one of the availability zone settings. - """ - try: - validate( - availability_zone_dict, - availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA) - except js_exceptions.ValidationError as e: - error_object = '' - if e.relative_path: - error_object = '{} '.format(e.relative_path[0]) - raise exceptions.UnsupportedOptionError( - user_fault_string='{0}{1}'.format(error_object, e.message), - operator_fault_string=str(e)) - except Exception as e: - raise exceptions.DriverError( - user_fault_string='Failed to validate the availability zone ' - 'metadata due to: {}'.format(str(e)), - operator_fault_string='Failed to validate the availability ' - 'zone metadata due to: {}'.format(str(e)) - ) - compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None) - if compute_zone: - compute_driver = stevedore_driver.DriverManager( - namespace='octavia.compute.drivers', - name=CONF.controller_worker.compute_driver, - invoke_on_load=True - ).driver - - # TODO(johnsom) Fix this to raise a NotFound error - # when the octavia-lib supports it. - compute_driver.validate_availability_zone(compute_zone) diff --git a/octavia/api/v2/controllers/amphora.py b/octavia/api/v2/controllers/amphora.py index 6882137c2f..4708c1399c 100644 --- a/octavia/api/v2/controllers/amphora.py +++ b/octavia/api/v2/controllers/amphora.py @@ -124,12 +124,8 @@ class FailoverController(base.BaseController): def __init__(self, amp_id): super().__init__() - if CONF.api_settings.default_provider_driver == constants.AMPHORAV1: - topic = cfg.CONF.oslo_messaging.topic - version = "1.0" - else: - topic = constants.TOPIC_AMPHORA_V2 - version = "2.0" + topic = constants.TOPIC_AMPHORA_V2 + version = "2.0" self.target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, topic=topic, version=version, fanout=False) @@ -170,12 +166,8 @@ class AmphoraUpdateController(base.BaseController): def __init__(self, amp_id): super().__init__() - if CONF.api_settings.default_provider_driver == constants.AMPHORAV1: - topic = cfg.CONF.oslo_messaging.topic - version = "1.0" - else: - topic = constants.TOPIC_AMPHORA_V2 - version = "2.0" + topic = constants.TOPIC_AMPHORA_V2 + version = "2.0" self.target = messaging.Target( namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, topic=topic, version=version, fanout=False) diff --git a/octavia/cmd/octavia_worker.py b/octavia/cmd/octavia_worker.py index ea18b96a08..4836dce66a 100644 --- a/octavia/cmd/octavia_worker.py +++ b/octavia/cmd/octavia_worker.py @@ -20,7 +20,6 @@ from oslo_config import cfg from oslo_reports import guru_meditation_report as gmr from octavia.common import service as octavia_service -from octavia.controller.queue.v1 import consumer as consumer_v1 from octavia.controller.queue.v2 import consumer as consumer_v2 from octavia import version @@ -33,8 +32,6 @@ def main(): gmr.TextGuruMeditation.setup_autorun(version) sm = cotyledon.ServiceManager() - sm.add(consumer_v1.ConsumerService, workers=CONF.controller_worker.workers, - args=(CONF,)) sm.add(consumer_v2.ConsumerService, workers=CONF.controller_worker.workers, args=(CONF,)) oslo_config_glue.setup(sm, CONF, reload_method="mutate") diff --git a/octavia/common/constants.py b/octavia/common/constants.py index c525c36413..f638bbb494 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -796,8 +796,6 @@ RBAC_ROLES_DEPRECATED_REASON = ( # PROVIDERS OCTAVIA = 'octavia' AMPHORAV2 = 'amphorav2' -# Deprecated in Z, to be removed -AMPHORAV1 = 'amphorav1' # systemctl commands DISABLE = 'disable' diff --git a/octavia/controller/healthmanager/health_manager.py b/octavia/controller/healthmanager/health_manager.py index 2f5462f422..c62c696a4d 100644 --- a/octavia/controller/healthmanager/health_manager.py +++ b/octavia/controller/healthmanager/health_manager.py @@ -23,7 +23,6 @@ from oslo_log import log as logging from oslo_utils import excutils from octavia.common import constants -from octavia.controller.worker.v1 import controller_worker as cw1 from octavia.controller.worker.v2 import controller_worker as cw2 from octavia.db import api as db_api from octavia.db import repositories as repo @@ -58,10 +57,7 @@ def update_stats_on_done(stats, fut): class HealthManager(object): def __init__(self, exit_event): - if CONF.api_settings.default_provider_driver == constants.AMPHORAV1: - self.cw = cw1.ControllerWorker() - else: - self.cw = cw2.ControllerWorker() + self.cw = cw2.ControllerWorker() self.threads = CONF.health_manager.failover_threads self.executor = futures.ThreadPoolExecutor(max_workers=self.threads) self.amp_repo = repo.AmphoraRepository() diff --git a/octavia/controller/housekeeping/house_keeping.py b/octavia/controller/housekeeping/house_keeping.py index ce94ce2c88..1a71b22a3f 100644 --- a/octavia/controller/housekeeping/house_keeping.py +++ b/octavia/controller/housekeeping/house_keeping.py @@ -19,8 +19,6 @@ from oslo_config import cfg from oslo_log import log as logging from sqlalchemy.orm import exc as sqlalchemy_exceptions -from octavia.common import constants -from octavia.controller.worker.v1 import controller_worker as cw1 from octavia.controller.worker.v2 import controller_worker as cw2 from octavia.db import api as db_api from octavia.db import repositories as repo @@ -78,10 +76,7 @@ class DatabaseCleanup(object): class CertRotation(object): def __init__(self): self.threads = CONF.house_keeping.cert_rotate_threads - if CONF.api_settings.default_provider_driver == constants.AMPHORAV1: - self.cw = cw1.ControllerWorker() - else: - self.cw = cw2.ControllerWorker() + self.cw = cw2.ControllerWorker() def rotate(self): """Check the amphora db table for expiring auth certs.""" diff --git a/octavia/controller/queue/v1/__init__.py b/octavia/controller/queue/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/controller/queue/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/controller/queue/v1/consumer.py b/octavia/controller/queue/v1/consumer.py deleted file mode 100644 index f21bbae67f..0000000000 --- a/octavia/controller/queue/v1/consumer.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cotyledon -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_messaging.rpc import dispatcher - -from octavia.common import rpc -from octavia.controller.queue.v1 import endpoints - -LOG = logging.getLogger(__name__) - - -class ConsumerService(cotyledon.Service): - - def __init__(self, worker_id, conf): - super().__init__(worker_id) - self.conf = conf - self.topic = conf.oslo_messaging.topic - self.server = conf.host - self.endpoints = [] - self.access_policy = dispatcher.DefaultRPCAccessPolicy - self.message_listener = None - - def run(self): - LOG.info('Starting consumer...') - target = messaging.Target(topic=self.topic, server=self.server, - fanout=False) - self.endpoints = [endpoints.Endpoints()] - self.message_listener = rpc.get_server( - target, self.endpoints, - executor='threading', - access_policy=self.access_policy - ) - self.message_listener.start() - - def terminate(self): - if self.message_listener: - LOG.info('Stopping consumer...') - self.message_listener.stop() - - LOG.info('Consumer successfully stopped. Waiting for final ' - 'messages to be processed...') - self.message_listener.wait() - if self.endpoints: - LOG.info('Shutting down endpoint worker executors...') - for e in self.endpoints: - try: - e.worker.executor.shutdown() - except AttributeError: - pass - super().terminate() diff --git a/octavia/controller/queue/v1/endpoints.py b/octavia/controller/queue/v1/endpoints.py deleted file mode 100644 index 3f0b00ad7b..0000000000 --- a/octavia/controller/queue/v1/endpoints.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging -import oslo_messaging as messaging -from stevedore import driver as stevedore_driver - -from octavia.common import constants - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class Endpoints(object): - - # API version history: - # 1.0 - Initial version. - target = messaging.Target( - namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT, - version='1.1') - - def __init__(self): - self.worker = stevedore_driver.DriverManager( - namespace='octavia.plugins', - name=CONF.octavia_plugins, - invoke_on_load=True - ).driver - - def create_load_balancer(self, context, load_balancer_id, - flavor=None, availability_zone=None): - LOG.info('Creating load balancer \'%s\'...', load_balancer_id) - self.worker.create_load_balancer(load_balancer_id, flavor, - availability_zone) - - def update_load_balancer(self, context, load_balancer_id, - load_balancer_updates): - LOG.info('Updating load balancer \'%s\'...', load_balancer_id) - self.worker.update_load_balancer(load_balancer_id, - load_balancer_updates) - - def delete_load_balancer(self, context, load_balancer_id, cascade=False): - LOG.info('Deleting load balancer \'%s\'...', load_balancer_id) - self.worker.delete_load_balancer(load_balancer_id, cascade) - - def failover_load_balancer(self, context, load_balancer_id): - LOG.info('Failing over amphora in load balancer \'%s\'...', - load_balancer_id) - self.worker.failover_loadbalancer(load_balancer_id) - - def failover_amphora(self, context, amphora_id): - LOG.info('Failing over amphora \'%s\'...', - amphora_id) - self.worker.failover_amphora(amphora_id) - - def create_listener(self, context, listener_id): - LOG.info('Creating listener \'%s\'...', listener_id) - self.worker.create_listener(listener_id) - - def update_listener(self, context, listener_id, listener_updates): - LOG.info('Updating listener \'%s\'...', listener_id) - self.worker.update_listener(listener_id, listener_updates) - - def delete_listener(self, context, listener_id): - LOG.info('Deleting listener \'%s\'...', listener_id) - self.worker.delete_listener(listener_id) - - def create_pool(self, context, pool_id): - LOG.info('Creating pool \'%s\'...', pool_id) - self.worker.create_pool(pool_id) - - def update_pool(self, context, pool_id, pool_updates): - LOG.info('Updating pool \'%s\'...', pool_id) - self.worker.update_pool(pool_id, pool_updates) - - def delete_pool(self, context, pool_id): - LOG.info('Deleting pool \'%s\'...', pool_id) - self.worker.delete_pool(pool_id) - - def create_health_monitor(self, context, health_monitor_id): - LOG.info('Creating health monitor \'%s\'...', health_monitor_id) - self.worker.create_health_monitor(health_monitor_id) - - def update_health_monitor(self, context, health_monitor_id, - health_monitor_updates): - LOG.info('Updating health monitor \'%s\'...', health_monitor_id) - self.worker.update_health_monitor(health_monitor_id, - health_monitor_updates) - - def delete_health_monitor(self, context, health_monitor_id): - LOG.info('Deleting health monitor \'%s\'...', health_monitor_id) - self.worker.delete_health_monitor(health_monitor_id) - - def create_member(self, context, member_id): - LOG.info('Creating member \'%s\'...', member_id) - self.worker.create_member(member_id) - - def update_member(self, context, member_id, member_updates): - LOG.info('Updating member \'%s\'...', member_id) - self.worker.update_member(member_id, member_updates) - - def batch_update_members(self, context, old_member_ids, new_member_ids, - updated_members): - updated_member_ids = [m.get('id') for m in updated_members] - LOG.info( - 'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', ' - 'updated=\'%(updated)s\'...', - {'old': old_member_ids, 'new': new_member_ids, - 'updated': updated_member_ids}) - self.worker.batch_update_members( - old_member_ids, new_member_ids, updated_members) - - def delete_member(self, context, member_id): - LOG.info('Deleting member \'%s\'...', member_id) - self.worker.delete_member(member_id) - - def create_l7policy(self, context, l7policy_id): - LOG.info('Creating l7policy \'%s\'...', l7policy_id) - self.worker.create_l7policy(l7policy_id) - - def update_l7policy(self, context, l7policy_id, l7policy_updates): - LOG.info('Updating l7policy \'%s\'...', l7policy_id) - self.worker.update_l7policy(l7policy_id, l7policy_updates) - - def delete_l7policy(self, context, l7policy_id): - LOG.info('Deleting l7policy \'%s\'...', l7policy_id) - self.worker.delete_l7policy(l7policy_id) - - def create_l7rule(self, context, l7rule_id): - LOG.info('Creating l7rule \'%s\'...', l7rule_id) - self.worker.create_l7rule(l7rule_id) - - def update_l7rule(self, context, l7rule_id, l7rule_updates): - LOG.info('Updating l7rule \'%s\'...', l7rule_id) - self.worker.update_l7rule(l7rule_id, l7rule_updates) - - def delete_l7rule(self, context, l7rule_id): - LOG.info('Deleting l7rule \'%s\'...', l7rule_id) - self.worker.delete_l7rule(l7rule_id) - - def update_amphora_agent_config(self, context, amphora_id): - LOG.info('Updating amphora \'%s\' agent configuration...', - amphora_id) - self.worker.update_amphora_agent_config(amphora_id) - - def delete_amphora(self, context, amphora_id): - LOG.info('Deleting amphora \'%s\'...', amphora_id) - self.worker.delete_amphora(amphora_id) diff --git a/octavia/controller/worker/v1/__init__.py b/octavia/controller/worker/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/controller/worker/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/controller/worker/v1/controller_worker.py b/octavia/controller/worker/v1/controller_worker.py deleted file mode 100644 index e14c41cd57..0000000000 --- a/octavia/controller/worker/v1/controller_worker.py +++ /dev/null @@ -1,1157 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from sqlalchemy.orm import exc as db_exceptions -from taskflow.listeners import logging as tf_logging -import tenacity - -from octavia.common import base_taskflow -from octavia.common import constants -from octavia.common import exceptions -from octavia.common import utils -from octavia.controller.worker.v1.flows import amphora_flows -from octavia.controller.worker.v1.flows import health_monitor_flows -from octavia.controller.worker.v1.flows import l7policy_flows -from octavia.controller.worker.v1.flows import l7rule_flows -from octavia.controller.worker.v1.flows import listener_flows -from octavia.controller.worker.v1.flows import load_balancer_flows -from octavia.controller.worker.v1.flows import member_flows -from octavia.controller.worker.v1.flows import pool_flows -from octavia.db import api as db_apis -from octavia.db import repositories as repo - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _is_provisioning_status_pending_update(lb_obj): - return not lb_obj.provisioning_status == constants.PENDING_UPDATE - - -class ControllerWorker(base_taskflow.BaseTaskFlowEngine): - - def __init__(self): - - LOG.warning("The 'amphorav1' provider is deprecated and will be " - "removed in a future release. Use the 'amphora' driver " - "instead.") - - self._amphora_flows = amphora_flows.AmphoraFlows() - self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows() - self._lb_flows = load_balancer_flows.LoadBalancerFlows() - self._listener_flows = listener_flows.ListenerFlows() - self._member_flows = member_flows.MemberFlows() - self._pool_flows = pool_flows.PoolFlows() - self._l7policy_flows = l7policy_flows.L7PolicyFlows() - self._l7rule_flows = l7rule_flows.L7RuleFlows() - - self._amphora_repo = repo.AmphoraRepository() - self._amphora_health_repo = repo.AmphoraHealthRepository() - self._health_mon_repo = repo.HealthMonitorRepository() - self._lb_repo = repo.LoadBalancerRepository() - self._listener_repo = repo.ListenerRepository() - self._member_repo = repo.MemberRepository() - self._pool_repo = repo.PoolRepository() - self._l7policy_repo = repo.L7PolicyRepository() - self._l7rule_repo = repo.L7RuleRepository() - self._flavor_repo = repo.FlavorRepository() - self._az_repo = repo.AvailabilityZoneRepository() - - super().__init__() - - @tenacity.retry( - retry=( - tenacity.retry_if_result(_is_provisioning_status_pending_update) | - tenacity.retry_if_exception_type()), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def _get_db_obj_until_pending_update(self, repo, id): - - return repo.get(db_apis.get_session(), id=id) - - def delete_amphora(self, amphora_id): - """Deletes an existing Amphora. - - :param amphora_id: ID of the amphora to delete - :returns: None - :raises AmphoraNotFound: The referenced Amphora was not found - """ - try: - amphora = self._amphora_repo.get(db_apis.get_session(), - id=amphora_id) - delete_amp_tf = self.taskflow_load( - self._amphora_flows.get_delete_amphora_flow(amphora)) - with tf_logging.DynamicLoggingListener(delete_amp_tf, log=LOG): - delete_amp_tf.run() - except Exception as e: - LOG.error('Failed to delete a amphora %s due to: %s', - amphora_id, str(e)) - return - LOG.info('Finished deleting amphora %s.', amphora_id) - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_health_monitor(self, health_monitor_id): - """Creates a health monitor. - - :param pool_id: ID of the pool to create a health monitor on - :returns: None - :raises NoResultFound: Unable to find the object - """ - health_mon = self._health_mon_repo.get(db_apis.get_session(), - id=health_monitor_id) - if not health_mon: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'health_monitor', health_monitor_id) - raise db_exceptions.NoResultFound - - pool = health_mon.pool - listeners = pool.listeners - pool.health_monitor = health_mon - load_balancer = pool.load_balancer - - create_hm_tf = self.taskflow_load( - self._health_monitor_flows.get_create_health_monitor_flow(), - store={constants.HEALTH_MON: health_mon, - constants.POOL: pool, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(create_hm_tf, - log=LOG): - create_hm_tf.run() - - def delete_health_monitor(self, health_monitor_id): - """Deletes a health monitor. - - :param pool_id: ID of the pool to delete its health monitor - :returns: None - :raises HMNotFound: The referenced health monitor was not found - """ - health_mon = self._health_mon_repo.get(db_apis.get_session(), - id=health_monitor_id) - - pool = health_mon.pool - listeners = pool.listeners - load_balancer = pool.load_balancer - - delete_hm_tf = self.taskflow_load( - self._health_monitor_flows.get_delete_health_monitor_flow(), - store={constants.HEALTH_MON: health_mon, - constants.POOL: pool, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(delete_hm_tf, - log=LOG): - delete_hm_tf.run() - - def update_health_monitor(self, health_monitor_id, health_monitor_updates): - """Updates a health monitor. - - :param pool_id: ID of the pool to have it's health monitor updated - :param health_monitor_updates: Dict containing updated health monitor - :returns: None - :raises HMNotFound: The referenced health monitor was not found - """ - health_mon = None - try: - health_mon = self._get_db_obj_until_pending_update( - self._health_mon_repo, health_monitor_id) - except tenacity.RetryError as e: - LOG.warning('Health monitor did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - health_mon = e.last_attempt.result() - - pool = health_mon.pool - listeners = pool.listeners - pool.health_monitor = health_mon - load_balancer = pool.load_balancer - - update_hm_tf = self.taskflow_load( - self._health_monitor_flows.get_update_health_monitor_flow(), - store={constants.HEALTH_MON: health_mon, - constants.POOL: pool, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.UPDATE_DICT: health_monitor_updates}) - with tf_logging.DynamicLoggingListener(update_hm_tf, - log=LOG): - update_hm_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_listener(self, listener_id): - """Creates a listener. - - :param listener_id: ID of the listener to create - :returns: None - :raises NoResultFound: Unable to find the object - """ - listener = self._listener_repo.get(db_apis.get_session(), - id=listener_id) - if not listener: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'listener', listener_id) - raise db_exceptions.NoResultFound - - load_balancer = listener.load_balancer - listeners = load_balancer.listeners - - create_listener_tf = self.taskflow_load(self._listener_flows. - get_create_listener_flow(), - store={constants.LOADBALANCER: - load_balancer, - constants.LISTENERS: - listeners}) - with tf_logging.DynamicLoggingListener(create_listener_tf, - log=LOG): - create_listener_tf.run() - - def delete_listener(self, listener_id): - """Deletes a listener. - - :param listener_id: ID of the listener to delete - :returns: None - :raises ListenerNotFound: The referenced listener was not found - """ - listener = self._listener_repo.get(db_apis.get_session(), - id=listener_id) - load_balancer = listener.load_balancer - - delete_listener_tf = self.taskflow_load( - self._listener_flows.get_delete_listener_flow(), - store={constants.LOADBALANCER: load_balancer, - constants.LISTENER: listener}) - with tf_logging.DynamicLoggingListener(delete_listener_tf, - log=LOG): - delete_listener_tf.run() - - def update_listener(self, listener_id, listener_updates): - """Updates a listener. - - :param listener_id: ID of the listener to update - :param listener_updates: Dict containing updated listener attributes - :returns: None - :raises ListenerNotFound: The referenced listener was not found - """ - listener = None - try: - listener = self._get_db_obj_until_pending_update( - self._listener_repo, listener_id) - except tenacity.RetryError as e: - LOG.warning('Listener did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - listener = e.last_attempt.result() - - load_balancer = listener.load_balancer - - update_listener_tf = self.taskflow_load(self._listener_flows. - get_update_listener_flow(), - store={constants.LISTENER: - listener, - constants.LOADBALANCER: - load_balancer, - constants.UPDATE_DICT: - listener_updates, - constants.LISTENERS: - [listener]}) - with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG): - update_listener_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_load_balancer(self, load_balancer_id, flavor=None, - availability_zone=None): - """Creates a load balancer by allocating Amphorae. - - First tries to allocate an existing Amphora in READY state. - If none are available it will attempt to build one specifically - for this load balancer. - - :param load_balancer_id: ID of the load balancer to create - :returns: None - :raises NoResultFound: Unable to find the object - """ - lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id) - if not lb: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'load_balancer', load_balancer_id) - raise db_exceptions.NoResultFound - - # TODO(johnsom) convert this to octavia_lib constant flavor - # once octavia is transitioned to use octavia_lib - store = {constants.LOADBALANCER_ID: load_balancer_id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: flavor, - constants.AVAILABILITY_ZONE: availability_zone} - - topology = lb.topology - - if (not CONF.nova.enable_anti_affinity or - topology == constants.TOPOLOGY_SINGLE): - store[constants.SERVER_GROUP_ID] = None - - store[constants.UPDATE_DICT] = { - constants.TOPOLOGY: topology - } - - create_lb_flow = self._lb_flows.get_create_load_balancer_flow( - topology=topology, listeners=lb.listeners) - - create_lb_tf = self.taskflow_load(create_lb_flow, store=store) - with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG): - create_lb_tf.run() - - def delete_load_balancer(self, load_balancer_id, cascade=False): - """Deletes a load balancer by de-allocating Amphorae. - - :param load_balancer_id: ID of the load balancer to delete - :returns: None - :raises LBNotFound: The referenced load balancer was not found - """ - lb = self._lb_repo.get(db_apis.get_session(), - id=load_balancer_id) - - if cascade: - (flow, - store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb) - else: - (flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb) - store.update({constants.LOADBALANCER: lb, - constants.SERVER_GROUP_ID: lb.server_group_id}) - delete_lb_tf = self.taskflow_load(flow, store=store) - - with tf_logging.DynamicLoggingListener(delete_lb_tf, - log=LOG): - delete_lb_tf.run() - - def update_load_balancer(self, load_balancer_id, load_balancer_updates): - """Updates a load balancer. - - :param load_balancer_id: ID of the load balancer to update - :param load_balancer_updates: Dict containing updated load balancer - :returns: None - :raises LBNotFound: The referenced load balancer was not found - """ - lb = None - try: - lb = self._get_db_obj_until_pending_update( - self._lb_repo, load_balancer_id) - except tenacity.RetryError as e: - LOG.warning('Load balancer did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - lb = e.last_attempt.result() - - listeners, _ = self._listener_repo.get_all( - db_apis.get_session(), - load_balancer_id=load_balancer_id) - - update_lb_tf = self.taskflow_load( - self._lb_flows.get_update_load_balancer_flow(), - store={constants.LOADBALANCER: lb, - constants.LISTENERS: listeners, - constants.UPDATE_DICT: load_balancer_updates}) - - with tf_logging.DynamicLoggingListener(update_lb_tf, - log=LOG): - update_lb_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_member(self, member_id): - """Creates a pool member. - - :param member_id: ID of the member to create - :returns: None - :raises NoSuitablePool: Unable to find the node pool - """ - member = self._member_repo.get(db_apis.get_session(), - id=member_id) - if not member: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'member', member_id) - raise db_exceptions.NoResultFound - - pool = member.pool - listeners = pool.listeners - load_balancer = pool.load_balancer - - store = { - constants.MEMBER: member, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.LOADBALANCER_ID: load_balancer.id, - constants.POOL: pool} - if load_balancer.availability_zone: - store[constants.AVAILABILITY_ZONE] = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), load_balancer.availability_zone)) - else: - store[constants.AVAILABILITY_ZONE] = {} - - create_member_tf = self.taskflow_load( - self._member_flows.get_create_member_flow(), - store=store) - with tf_logging.DynamicLoggingListener(create_member_tf, - log=LOG): - create_member_tf.run() - - def delete_member(self, member_id): - """Deletes a pool member. - - :param member_id: ID of the member to delete - :returns: None - :raises MemberNotFound: The referenced member was not found - """ - member = self._member_repo.get(db_apis.get_session(), - id=member_id) - pool = member.pool - listeners = pool.listeners - load_balancer = pool.load_balancer - - store = { - constants.MEMBER: member, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.LOADBALANCER_ID: load_balancer.id, - constants.POOL: pool} - if load_balancer.availability_zone: - store[constants.AVAILABILITY_ZONE] = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), load_balancer.availability_zone)) - else: - store[constants.AVAILABILITY_ZONE] = {} - - delete_member_tf = self.taskflow_load( - self._member_flows.get_delete_member_flow(), - store=store - ) - with tf_logging.DynamicLoggingListener(delete_member_tf, - log=LOG): - delete_member_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def batch_update_members(self, old_member_ids, new_member_ids, - updated_members): - new_members = [self._member_repo.get(db_apis.get_session(), id=mid) - for mid in new_member_ids] - # The API may not have commited all of the new member records yet. - # Make sure we retry looking them up. - if None in new_members or len(new_members) != len(new_member_ids): - LOG.warning('Failed to fetch one of the new members from DB. ' - 'Retrying for up to 60 seconds.') - raise db_exceptions.NoResultFound - old_members = [self._member_repo.get(db_apis.get_session(), id=mid) - for mid in old_member_ids] - updated_members = [ - (self._member_repo.get(db_apis.get_session(), id=m.get('id')), m) - for m in updated_members] - if old_members: - pool = old_members[0].pool - elif new_members: - pool = new_members[0].pool - else: - pool = updated_members[0][0].pool - listeners = pool.listeners - load_balancer = pool.load_balancer - - store = { - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.LOADBALANCER_ID: load_balancer.id, - constants.POOL: pool} - if load_balancer.availability_zone: - store[constants.AVAILABILITY_ZONE] = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), load_balancer.availability_zone)) - else: - store[constants.AVAILABILITY_ZONE] = {} - - batch_update_members_tf = self.taskflow_load( - self._member_flows.get_batch_update_members_flow( - old_members, new_members, updated_members), - store=store) - with tf_logging.DynamicLoggingListener(batch_update_members_tf, - log=LOG): - batch_update_members_tf.run() - - def update_member(self, member_id, member_updates): - """Updates a pool member. - - :param member_id: ID of the member to update - :param member_updates: Dict containing updated member attributes - :returns: None - :raises MemberNotFound: The referenced member was not found - """ - try: - member = self._get_db_obj_until_pending_update( - self._member_repo, member_id) - except tenacity.RetryError as e: - LOG.warning('Member did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - member = e.last_attempt.result() - - pool = member.pool - listeners = pool.listeners - load_balancer = pool.load_balancer - - store = { - constants.MEMBER: member, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.POOL: pool, - constants.UPDATE_DICT: member_updates} - if load_balancer.availability_zone: - store[constants.AVAILABILITY_ZONE] = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), load_balancer.availability_zone)) - else: - store[constants.AVAILABILITY_ZONE] = {} - - update_member_tf = self.taskflow_load( - self._member_flows.get_update_member_flow(), - store=store) - with tf_logging.DynamicLoggingListener(update_member_tf, - log=LOG): - update_member_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_pool(self, pool_id): - """Creates a node pool. - - :param pool_id: ID of the pool to create - :returns: None - :raises NoResultFound: Unable to find the object - """ - pool = self._pool_repo.get(db_apis.get_session(), - id=pool_id) - if not pool: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'pool', pool_id) - raise db_exceptions.NoResultFound - - listeners = pool.listeners - load_balancer = pool.load_balancer - - create_pool_tf = self.taskflow_load(self._pool_flows. - get_create_pool_flow(), - store={constants.POOL: pool, - constants.LISTENERS: - listeners, - constants.LOADBALANCER: - load_balancer}) - with tf_logging.DynamicLoggingListener(create_pool_tf, - log=LOG): - create_pool_tf.run() - - def delete_pool(self, pool_id): - """Deletes a node pool. - - :param pool_id: ID of the pool to delete - :returns: None - :raises PoolNotFound: The referenced pool was not found - """ - pool = self._pool_repo.get(db_apis.get_session(), - id=pool_id) - - load_balancer = pool.load_balancer - listeners = pool.listeners - - delete_pool_tf = self.taskflow_load( - self._pool_flows.get_delete_pool_flow(), - store={constants.POOL: pool, constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(delete_pool_tf, - log=LOG): - delete_pool_tf.run() - - def update_pool(self, pool_id, pool_updates): - """Updates a node pool. - - :param pool_id: ID of the pool to update - :param pool_updates: Dict containing updated pool attributes - :returns: None - :raises PoolNotFound: The referenced pool was not found - """ - pool = None - try: - pool = self._get_db_obj_until_pending_update( - self._pool_repo, pool_id) - except tenacity.RetryError as e: - LOG.warning('Pool did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - pool = e.last_attempt.result() - - listeners = pool.listeners - load_balancer = pool.load_balancer - - update_pool_tf = self.taskflow_load(self._pool_flows. - get_update_pool_flow(), - store={constants.POOL: pool, - constants.LISTENERS: - listeners, - constants.LOADBALANCER: - load_balancer, - constants.UPDATE_DICT: - pool_updates}) - with tf_logging.DynamicLoggingListener(update_pool_tf, - log=LOG): - update_pool_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_l7policy(self, l7policy_id): - """Creates an L7 Policy. - - :param l7policy_id: ID of the l7policy to create - :returns: None - :raises NoResultFound: Unable to find the object - """ - l7policy = self._l7policy_repo.get(db_apis.get_session(), - id=l7policy_id) - if not l7policy: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'l7policy', l7policy_id) - raise db_exceptions.NoResultFound - - listeners = [l7policy.listener] - load_balancer = l7policy.listener.load_balancer - - create_l7policy_tf = self.taskflow_load( - self._l7policy_flows.get_create_l7policy_flow(), - store={constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(create_l7policy_tf, - log=LOG): - create_l7policy_tf.run() - - def delete_l7policy(self, l7policy_id): - """Deletes an L7 policy. - - :param l7policy_id: ID of the l7policy to delete - :returns: None - :raises L7PolicyNotFound: The referenced l7policy was not found - """ - l7policy = self._l7policy_repo.get(db_apis.get_session(), - id=l7policy_id) - - load_balancer = l7policy.listener.load_balancer - listeners = [l7policy.listener] - - delete_l7policy_tf = self.taskflow_load( - self._l7policy_flows.get_delete_l7policy_flow(), - store={constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(delete_l7policy_tf, - log=LOG): - delete_l7policy_tf.run() - - def update_l7policy(self, l7policy_id, l7policy_updates): - """Updates an L7 policy. - - :param l7policy_id: ID of the l7policy to update - :param l7policy_updates: Dict containing updated l7policy attributes - :returns: None - :raises L7PolicyNotFound: The referenced l7policy was not found - """ - l7policy = None - try: - l7policy = self._get_db_obj_until_pending_update( - self._l7policy_repo, l7policy_id) - except tenacity.RetryError as e: - LOG.warning('L7 policy did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - l7policy = e.last_attempt.result() - - listeners = [l7policy.listener] - load_balancer = l7policy.listener.load_balancer - - update_l7policy_tf = self.taskflow_load( - self._l7policy_flows.get_update_l7policy_flow(), - store={constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.UPDATE_DICT: l7policy_updates}) - with tf_logging.DynamicLoggingListener(update_l7policy_tf, - log=LOG): - update_l7policy_tf.run() - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), - wait=tenacity.wait_incrementing( - CONF.haproxy_amphora.api_db_commit_retry_initial_delay, - CONF.haproxy_amphora.api_db_commit_retry_backoff, - CONF.haproxy_amphora.api_db_commit_retry_max), - stop=tenacity.stop_after_attempt( - CONF.haproxy_amphora.api_db_commit_retry_attempts)) - def create_l7rule(self, l7rule_id): - """Creates an L7 Rule. - - :param l7rule_id: ID of the l7rule to create - :returns: None - :raises NoResultFound: Unable to find the object - """ - l7rule = self._l7rule_repo.get(db_apis.get_session(), - id=l7rule_id) - if not l7rule: - LOG.warning('Failed to fetch %s %s from DB. Retrying for up to ' - '60 seconds.', 'l7rule', l7rule_id) - raise db_exceptions.NoResultFound - - l7policy = l7rule.l7policy - listeners = [l7policy.listener] - load_balancer = l7policy.listener.load_balancer - - create_l7rule_tf = self.taskflow_load( - self._l7rule_flows.get_create_l7rule_flow(), - store={constants.L7RULE: l7rule, - constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(create_l7rule_tf, - log=LOG): - create_l7rule_tf.run() - - def delete_l7rule(self, l7rule_id): - """Deletes an L7 rule. - - :param l7rule_id: ID of the l7rule to delete - :returns: None - :raises L7RuleNotFound: The referenced l7rule was not found - """ - l7rule = self._l7rule_repo.get(db_apis.get_session(), - id=l7rule_id) - l7policy = l7rule.l7policy - load_balancer = l7policy.listener.load_balancer - listeners = [l7policy.listener] - - delete_l7rule_tf = self.taskflow_load( - self._l7rule_flows.get_delete_l7rule_flow(), - store={constants.L7RULE: l7rule, - constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer}) - with tf_logging.DynamicLoggingListener(delete_l7rule_tf, - log=LOG): - delete_l7rule_tf.run() - - def update_l7rule(self, l7rule_id, l7rule_updates): - """Updates an L7 rule. - - :param l7rule_id: ID of the l7rule to update - :param l7rule_updates: Dict containing updated l7rule attributes - :returns: None - :raises L7RuleNotFound: The referenced l7rule was not found - """ - l7rule = None - try: - l7rule = self._get_db_obj_until_pending_update( - self._l7rule_repo, l7rule_id) - except tenacity.RetryError as e: - LOG.warning('L7 rule did not go into %s in 60 seconds. ' - 'This either due to an in-progress Octavia upgrade ' - 'or an overloaded and failing database. Assuming ' - 'an upgrade is in progress and continuing.', - constants.PENDING_UPDATE) - l7rule = e.last_attempt.result() - - l7policy = l7rule.l7policy - listeners = [l7policy.listener] - load_balancer = l7policy.listener.load_balancer - - update_l7rule_tf = self.taskflow_load( - self._l7rule_flows.get_update_l7rule_flow(), - store={constants.L7RULE: l7rule, - constants.L7POLICY: l7policy, - constants.LISTENERS: listeners, - constants.LOADBALANCER: load_balancer, - constants.UPDATE_DICT: l7rule_updates}) - with tf_logging.DynamicLoggingListener(update_l7rule_tf, - log=LOG): - update_l7rule_tf.run() - - def failover_amphora(self, amphora_id, reraise=False): - """Perform failover operations for an amphora. - - Note: This expects the load balancer to already be in - provisioning_status=PENDING_UPDATE state. - - :param amphora_id: ID for amphora to failover - :param reraise: If enabled reraise any caught exception - :returns: None - :raises octavia.common.exceptions.NotFound: The referenced amphora was - not found - """ - amphora = None - try: - amphora = self._amphora_repo.get(db_apis.get_session(), - id=amphora_id) - if amphora is None: - LOG.error('Amphora failover for amphora %s failed because ' - 'there is no record of this amphora in the ' - 'database. Check that the [house_keeping] ' - 'amphora_expiry_age configuration setting is not ' - 'too short. Skipping failover.', amphora_id) - raise exceptions.NotFound(resource=constants.AMPHORA, - id=amphora_id) - - if amphora.status == constants.DELETED: - LOG.warning('Amphora %s is marked DELETED in the database but ' - 'was submitted for failover. Deleting it from the ' - 'amphora health table to exclude it from health ' - 'checks and skipping the failover.', amphora.id) - self._amphora_health_repo.delete(db_apis.get_session(), - amphora_id=amphora.id) - return - - loadbalancer = None - if amphora.load_balancer_id: - loadbalancer = self._lb_repo.get(db_apis.get_session(), - id=amphora.load_balancer_id) - lb_amp_count = None - if loadbalancer: - if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - lb_amp_count = 2 - elif loadbalancer.topology == constants.TOPOLOGY_SINGLE: - lb_amp_count = 1 - - amp_failover_flow = self._amphora_flows.get_failover_amphora_flow( - amphora, lb_amp_count) - - az_metadata = {} - flavor = {} - lb_id = None - vip = None - server_group_id = None - if loadbalancer: - lb_id = loadbalancer.id - if loadbalancer.flavor_id: - flavor = self._flavor_repo.get_flavor_metadata_dict( - db_apis.get_session(), loadbalancer.flavor_id) - flavor[constants.LOADBALANCER_TOPOLOGY] = ( - loadbalancer.topology) - else: - flavor = {constants.LOADBALANCER_TOPOLOGY: - loadbalancer.topology} - if loadbalancer.availability_zone: - az_metadata = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), - loadbalancer.availability_zone)) - vip = loadbalancer.vip - server_group_id = loadbalancer.server_group_id - - stored_params = {constants.AVAILABILITY_ZONE: az_metadata, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: flavor, - constants.LOADBALANCER: loadbalancer, - constants.SERVER_GROUP_ID: server_group_id, - constants.LOADBALANCER_ID: lb_id, - constants.VIP: vip} - - failover_amphora_tf = self.taskflow_load(amp_failover_flow, - store=stored_params) - - with tf_logging.DynamicLoggingListener(failover_amphora_tf, - log=LOG): - failover_amphora_tf.run() - - LOG.info("Successfully completed the failover for an amphora: %s", - {"id": amphora_id, - "load_balancer_id": lb_id, - "lb_network_ip": amphora.lb_network_ip, - "compute_id": amphora.compute_id, - "role": amphora.role}) - - except Exception as e: - with excutils.save_and_reraise_exception(reraise=reraise): - LOG.exception("Amphora %s failover exception: %s", - amphora_id, str(e)) - self._amphora_repo.update(db_apis.get_session(), - amphora_id, status=constants.ERROR) - if amphora and amphora.load_balancer_id: - self._lb_repo.update( - db_apis.get_session(), amphora.load_balancer_id, - provisioning_status=constants.ERROR) - - @staticmethod - def _get_amphorae_for_failover(load_balancer): - """Returns an ordered list of amphora to failover. - - :param load_balancer: The load balancer being failed over. - :returns: An ordered list of amphora to failover, - first amp to failover is last in the list - :raises octavia.common.exceptions.InvalidTopology: LB has an unknown - topology. - """ - if load_balancer.topology == constants.TOPOLOGY_SINGLE: - # In SINGLE topology, amp failover order does not matter - return [a for a in load_balancer.amphorae - if a.status != constants.DELETED] - - if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - # In Active/Standby we should preference the standby amp - # for failover first in case the Active is still able to pass - # traffic. - # Note: The active amp can switch at any time and in less than a - # second, so this is "best effort". - amphora_driver = utils.get_amphora_driver() - timeout_dict = { - constants.CONN_MAX_RETRIES: - CONF.haproxy_amphora.failover_connection_max_retries, - constants.CONN_RETRY_INTERVAL: - CONF.haproxy_amphora.failover_connection_retry_interval} - amps = [] - selected_amp = None - for amp in load_balancer.amphorae: - if amp.status == constants.DELETED: - continue - if selected_amp is None: - try: - if amphora_driver.get_interface_from_ip( - amp, load_balancer.vip.ip_address, - timeout_dict): - # This is a potential ACTIVE, add it to the list - amps.append(amp) - else: - # This one doesn't have the VIP IP, so start - # failovers here. - selected_amp = amp - LOG.debug("Selected amphora %s as the initial " - "failover amphora.", amp.id) - except Exception: - # This amphora is broken, so start failovers here. - selected_amp = amp - else: - # We have already found a STANDBY, so add the rest to the - # list without querying them. - amps.append(amp) - # Put the selected amphora at the end of the list so it is - # first to failover. - if selected_amp: - amps.append(selected_amp) - return amps - - LOG.error('Unknown load balancer topology found: %s, aborting ' - 'failover.', load_balancer.topology) - raise exceptions.InvalidTopology(topology=load_balancer.topology) - - def failover_loadbalancer(self, load_balancer_id): - """Perform failover operations for a load balancer. - - Note: This expects the load balancer to already be in - provisioning_status=PENDING_UPDATE state. - - :param load_balancer_id: ID for load balancer to failover - :returns: None - :raises octavia.commom.exceptions.NotFound: The load balancer was not - found. - """ - try: - lb = self._lb_repo.get(db_apis.get_session(), - id=load_balancer_id) - if lb is None: - raise exceptions.NotFound(resource=constants.LOADBALANCER, - id=load_balancer_id) - - # Get the ordered list of amphorae to failover for this LB. - amps = self._get_amphorae_for_failover(lb) - - if lb.topology == constants.TOPOLOGY_SINGLE: - if len(amps) != 1: - LOG.warning('%d amphorae found on load balancer %s where ' - 'one should exist. Repairing.', len(amps), - load_balancer_id) - elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - - if len(amps) != 2: - LOG.warning('%d amphorae found on load balancer %s where ' - 'two should exist. Repairing.', len(amps), - load_balancer_id) - else: - LOG.error('Unknown load balancer topology found: %s, aborting ' - 'failover!', lb.topology) - raise exceptions.InvalidTopology(topology=lb.topology) - - # Build our failover flow. - lb_failover_flow = self._lb_flows.get_failover_LB_flow(amps, lb) - - # We must provide a topology in the flavor definition - # here for the amphora to be created with the correct - # configuration. - if lb.flavor_id: - flavor = self._flavor_repo.get_flavor_metadata_dict( - db_apis.get_session(), lb.flavor_id) - flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology - else: - flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology} - - stored_params = {constants.LOADBALANCER: lb, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.SERVER_GROUP_ID: lb.server_group_id, - constants.LOADBALANCER_ID: lb.id, - constants.FLAVOR: flavor} - - if lb.availability_zone: - stored_params[constants.AVAILABILITY_ZONE] = ( - self._az_repo.get_availability_zone_metadata_dict( - db_apis.get_session(), lb.availability_zone)) - else: - stored_params[constants.AVAILABILITY_ZONE] = {} - - failover_lb_tf = self.taskflow_load(lb_failover_flow, - store=stored_params) - - with tf_logging.DynamicLoggingListener(failover_lb_tf, log=LOG): - failover_lb_tf.run() - LOG.info('Failover of load balancer %s completed successfully.', - lb.id) - - except Exception as e: - with excutils.save_and_reraise_exception(reraise=False): - LOG.exception("LB %(lbid)s failover exception: %(exc)s", - {'lbid': load_balancer_id, 'exc': str(e)}) - self._lb_repo.update( - db_apis.get_session(), load_balancer_id, - provisioning_status=constants.ERROR) - - def amphora_cert_rotation(self, amphora_id): - """Perform cert rotation for an amphora. - - :param amphora_id: ID for amphora to rotate - :returns: None - :raises AmphoraNotFound: The referenced amphora was not found - """ - - amp = self._amphora_repo.get(db_apis.get_session(), - id=amphora_id) - LOG.info("Start amphora cert rotation, amphora's id is: %s", - amphora_id) - - certrotation_amphora_tf = self.taskflow_load( - self._amphora_flows.cert_rotate_amphora_flow(), - store={constants.AMPHORA: amp, - constants.AMPHORA_ID: amp.id}) - - with tf_logging.DynamicLoggingListener(certrotation_amphora_tf, - log=LOG): - certrotation_amphora_tf.run() - LOG.info("Finished amphora cert rotation, amphora's id was: %s", - amphora_id) - - def update_amphora_agent_config(self, amphora_id): - """Update the amphora agent configuration. - - Note: This will update the amphora agent configuration file and - update the running configuration for mutatable configuration - items. - - :param amphora_id: ID of the amphora to update. - :returns: None - """ - LOG.info("Start amphora agent configuration update, amphora's id " - "is: %s", amphora_id) - amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id) - lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(), - amphora_id) - flavor = {} - if lb.flavor_id: - flavor = self._flavor_repo.get_flavor_metadata_dict( - db_apis.get_session(), lb.flavor_id) - - update_amphora_tf = self.taskflow_load( - self._amphora_flows.update_amphora_config_flow(), - store={constants.AMPHORA: amp, - constants.FLAVOR: flavor}) - - with tf_logging.DynamicLoggingListener(update_amphora_tf, - log=LOG): - update_amphora_tf.run() - LOG.info("Finished amphora agent configuration update, amphora's id " - "was: %s", amphora_id) diff --git a/octavia/controller/worker/v1/flows/__init__.py b/octavia/controller/worker/v1/flows/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/controller/worker/v1/flows/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/controller/worker/v1/flows/amphora_flows.py b/octavia/controller/worker/v1/flows/amphora_flows.py deleted file mode 100644 index ac8e37655d..0000000000 --- a/octavia/controller/worker/v1/flows/amphora_flows.py +++ /dev/null @@ -1,610 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# Copyright 2020 Red Hat, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import log as logging -from taskflow.patterns import linear_flow -from taskflow.patterns import unordered_flow - -from octavia.common import constants -from octavia.common import utils -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import cert_task -from octavia.controller.worker.v1.tasks import compute_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import network_tasks -from octavia.controller.worker.v1.tasks import retry_tasks - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class AmphoraFlows(object): - - def get_create_amphora_flow(self): - """Creates a flow to create an amphora. - - :returns: The flow for creating the amphora - """ - create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW) - create_amphora_flow.add(database_tasks.CreateAmphoraInDB( - provides=constants.AMPHORA_ID)) - create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( - requires=constants.AMPHORA_ID)) - create_amphora_flow.add(cert_task.GenerateServerPEMTask( - provides=constants.SERVER_PEM)) - create_amphora_flow.add( - database_tasks.UpdateAmphoraDBCertExpiration( - requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) - create_amphora_flow.add(compute_tasks.CertComputeCreate( - requires=(constants.AMPHORA_ID, constants.SERVER_PEM, - constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR, constants.AVAILABILITY_ZONE), - provides=constants.COMPUTE_ID)) - create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( - requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) - create_amphora_flow.add(compute_tasks.ComputeActiveWait( - requires=(constants.COMPUTE_ID, constants.AMPHORA_ID), - provides=constants.COMPUTE_OBJ)) - create_amphora_flow.add(database_tasks.UpdateAmphoraInfo( - requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), - provides=constants.AMPHORA)) - create_amphora_flow.add( - amphora_driver_tasks.AmphoraComputeConnectivityWait( - requires=constants.AMPHORA)) - create_amphora_flow.add(database_tasks.ReloadAmphora( - requires=constants.AMPHORA_ID, - provides=constants.AMPHORA)) - create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize( - requires=constants.AMPHORA)) - create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB( - requires=constants.AMPHORA)) - - return create_amphora_flow - - def _get_post_map_lb_subflow(self, prefix, role): - """Set amphora type after mapped to lb.""" - - sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW - post_map_amp_to_lb = linear_flow.Flow( - sf_name) - - post_map_amp_to_lb.add(database_tasks.ReloadAmphora( - name=sf_name + '-' + constants.RELOAD_AMPHORA, - requires=constants.AMPHORA_ID, - provides=constants.AMPHORA)) - - post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate( - name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK, - requires=(constants.AMPHORA, constants.FLAVOR))) - - if role == constants.ROLE_MASTER: - post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB( - name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, - requires=constants.AMPHORA)) - elif role == constants.ROLE_BACKUP: - post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB( - name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, - requires=constants.AMPHORA)) - elif role == constants.ROLE_STANDALONE: - post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB( - name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, - requires=constants.AMPHORA)) - - return post_map_amp_to_lb - - def _get_create_amp_for_lb_subflow(self, prefix, role): - """Create a new amphora for lb.""" - - sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW - create_amp_for_lb_subflow = linear_flow.Flow(sf_name) - create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB( - name=sf_name + '-' + constants.CREATE_AMPHORA_INDB, - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORA_ID)) - - create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( - name=sf_name + '-' + constants.GENERATE_SERVER_PEM, - provides=constants.SERVER_PEM)) - - create_amp_for_lb_subflow.add( - database_tasks.UpdateAmphoraDBCertExpiration( - name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, - requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) - - create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( - name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, - requires=(constants.AMPHORA_ID, constants.SERVER_PEM, - constants.BUILD_TYPE_PRIORITY, - constants.SERVER_GROUP_ID, - constants.FLAVOR, constants.AVAILABILITY_ZONE), - provides=constants.COMPUTE_ID)) - create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( - name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, - requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) - create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB( - name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB, - requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) - create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait( - name=sf_name + '-' + constants.COMPUTE_WAIT, - requires=(constants.COMPUTE_ID, constants.AMPHORA_ID, - constants.AVAILABILITY_ZONE), - provides=constants.COMPUTE_OBJ)) - create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo( - name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, - requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), - provides=constants.AMPHORA)) - create_amp_for_lb_subflow.add( - amphora_driver_tasks.AmphoraComputeConnectivityWait( - name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT, - requires=constants.AMPHORA)) - create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize( - name=sf_name + '-' + constants.AMPHORA_FINALIZE, - requires=constants.AMPHORA)) - create_amp_for_lb_subflow.add( - database_tasks.MarkAmphoraAllocatedInDB( - name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB, - requires=(constants.AMPHORA, constants.LOADBALANCER_ID))) - create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora( - name=sf_name + '-' + constants.RELOAD_AMPHORA, - requires=constants.AMPHORA_ID, - provides=constants.AMPHORA)) - - if role == constants.ROLE_MASTER: - create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB( - name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB, - requires=constants.AMPHORA)) - elif role == constants.ROLE_BACKUP: - create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB( - name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB, - requires=constants.AMPHORA)) - elif role == constants.ROLE_STANDALONE: - create_amp_for_lb_subflow.add( - database_tasks.MarkAmphoraStandAloneInDB( - name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB, - requires=constants.AMPHORA)) - - return create_amp_for_lb_subflow - - def get_amphora_for_lb_subflow( - self, prefix, role=constants.ROLE_STANDALONE): - return self._get_create_amp_for_lb_subflow(prefix, role) - - def get_delete_amphora_flow( - self, amphora, - retry_attempts=CONF.controller_worker.amphora_delete_retries, - retry_interval=( - CONF.controller_worker.amphora_delete_retry_interval)): - """Creates a subflow to delete an amphora and it's port. - - This flow is idempotent and safe to retry. - - :param amphora: An amphora object. - :param retry_attempts: The number of times the flow is retried. - :param retry_interval: The time to wait, in seconds, between retries. - :returns: The subflow for deleting the amphora. - :raises AmphoraNotFound: The referenced Amphora was not found. - """ - - delete_amphora_flow = linear_flow.Flow( - name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id, - retry=retry_tasks.SleepingRetryTimesController( - name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' + - amphora.id, - attempts=retry_attempts, interval=retry_interval)) - delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( - name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id, - inject={constants.AMPHORA: amphora})) - delete_amphora_flow.add( - database_tasks.MarkAmphoraPendingDeleteInDB( - name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id, - inject={constants.AMPHORA: amphora})) - delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy( - name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id, - inject={constants.AMPHORA: amphora})) - delete_amphora_flow.add(compute_tasks.ComputeDelete( - name=constants.DELETE_AMPHORA + '-' + amphora.id, - inject={constants.AMPHORA: amphora, - constants.PASSIVE_FAILURE: True})) - delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring( - name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id, - inject={constants.AMPHORA: amphora})) - delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB( - name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id, - inject={constants.AMPHORA: amphora})) - if amphora.vrrp_port_id: - delete_amphora_flow.add(network_tasks.DeletePort( - name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' + - str(amphora.vrrp_port_id)), - inject={constants.PORT_ID: amphora.vrrp_port_id, - constants.PASSIVE_FAILURE: True})) - # TODO(johnsom) What about cleaning up any member ports? - # maybe we should get the list of attached ports prior to delete - # and call delete on them here. Fix this as part of - # https://storyboard.openstack.org/#!/story/2007077 - - return delete_amphora_flow - - def get_vrrp_subflow(self, prefix, timeout_dict=None, - create_vrrp_group=True): - sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW - vrrp_subflow = linear_flow.Flow(sf_name) - - # Optimization for failover flow. No reason to call this - # when configuring the secondary amphora. - if create_vrrp_group: - vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( - name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, - requires=constants.LOADBALANCER_ID)) - - vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs( - name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE_NETWORK_CONFIG)) - - # VRRP update needs to be run on all amphora to update - # their peer configurations. So parallelize this with an - # unordered subflow. - update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow') - - # We have three tasks to run in order, per amphora - amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow') - - amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( - name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, - inject={constants.AMPHORA_INDEX: 0, - constants.TIMEOUT_DICT: timeout_dict}, - provides=constants.AMP_VRRP_INT)) - - amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( - name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE, - requires=(constants.LOADBALANCER_ID, - constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), - inject={constants.AMPHORA_INDEX: 0, - constants.TIMEOUT_DICT: timeout_dict})) - - amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( - name=sf_name + '-0-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, - inject={constants.AMPHORA_INDEX: 0, - constants.TIMEOUT_DICT: timeout_dict})) - - amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow') - - amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface( - name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORAE, - inject={constants.AMPHORA_INDEX: 1, - constants.TIMEOUT_DICT: timeout_dict}, - provides=constants.AMP_VRRP_INT)) - - amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate( - name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE, - requires=(constants.LOADBALANCER_ID, - constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE, - constants.AMP_VRRP_INT), - inject={constants.AMPHORA_INDEX: 1, - constants.TIMEOUT_DICT: timeout_dict})) - amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart( - name=sf_name + '-1-' + constants.AMP_VRRP_START, - requires=constants.AMPHORAE, - inject={constants.AMPHORA_INDEX: 1, - constants.TIMEOUT_DICT: timeout_dict})) - - update_amps_subflow.add(amp_0_subflow) - update_amps_subflow.add(amp_1_subflow) - - vrrp_subflow.add(update_amps_subflow) - - return vrrp_subflow - - def cert_rotate_amphora_flow(self): - """Implement rotation for amphora's cert. - - 1. Create a new certificate - 2. Upload the cert to amphora - 3. update the newly created certificate info to amphora - 4. update the cert_busy flag to be false after rotation - - :returns: The flow for updating an amphora - """ - rotated_amphora_flow = linear_flow.Flow( - constants.CERT_ROTATE_AMPHORA_FLOW) - - rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( - requires=constants.AMPHORA)) - - # create a new certificate, the returned value is the newly created - # certificate - rotated_amphora_flow.add(cert_task.GenerateServerPEMTask( - provides=constants.SERVER_PEM)) - - # update it in amphora task - rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload( - requires=(constants.AMPHORA, constants.SERVER_PEM))) - - # update the newly created certificate info to amphora - rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration( - requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) - - # update the cert_busy flag to be false after rotation - rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse( - requires=constants.AMPHORA)) - - return rotated_amphora_flow - - def update_amphora_config_flow(self): - """Creates a flow to update the amphora agent configuration. - - :returns: The flow for updating an amphora - """ - update_amphora_flow = linear_flow.Flow( - constants.UPDATE_AMPHORA_CONFIG_FLOW) - - update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( - requires=constants.AMPHORA)) - - update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate( - requires=(constants.AMPHORA, constants.FLAVOR))) - - return update_amphora_flow - - def get_amphora_for_lb_failover_subflow( - self, prefix, role=constants.ROLE_STANDALONE, - failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False): - """Creates a new amphora that will be used in a failover flow. - - :requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer - :provides: amphora_id, amphora - :param prefix: The flow name prefix to use on the flow and tasks. - :param role: The role this amphora will have in the topology. - :param failed_amp_vrrp_port_id: The base port ID of the failed amp. - :param is_vrrp_ipv6: True if the base port IP is IPv6. - :return: A Taskflow sub-flow that will create the amphora. - """ - - sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW - - amp_for_failover_flow = linear_flow.Flow(sf_name) - - # Try to allocate or boot an amphora instance (unconfigured) - amp_for_failover_flow.add(self.get_amphora_for_lb_subflow( - prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW, - role=role)) - - # Create the VIP base (aka VRRP) port for the amphora. - amp_for_failover_flow.add(network_tasks.CreateVIPBasePort( - name=prefix + '-' + constants.CREATE_VIP_BASE_PORT, - requires=(constants.VIP, constants.VIP_SG_ID, - constants.AMPHORA_ID), - provides=constants.BASE_PORT)) - - # Attach the VIP base (aka VRRP) port to the amphora. - amp_for_failover_flow.add(compute_tasks.AttachPort( - name=prefix + '-' + constants.ATTACH_PORT, - requires=(constants.AMPHORA, constants.PORT), - rebind={constants.PORT: constants.BASE_PORT})) - - # Update the amphora database record with the VIP base port info. - amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails( - name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS, - requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT))) - - # Make sure the amphora in the flow storage is up to date - # or the vrrp_ip will be empty - amp_for_failover_flow.add(database_tasks.ReloadAmphora( - name=prefix + '-' + constants.RELOAD_AMPHORA, - requires=constants.AMPHORA_ID, provides=constants.AMPHORA)) - - # Update the amphora networking for the plugged VIP port - amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( - name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID, - requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), - provides=constants.AMPHORAE_NETWORK_CONFIG)) - - # Disable the base (vrrp) port on the failed amphora - # This prevents a DAD failure when bringing up the new amphora. - # Keepalived will handle this for act/stdby. - if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and - is_vrrp_ipv6): - amp_for_failover_flow.add(network_tasks.AdminDownPort( - name=prefix + '-' + constants.ADMIN_DOWN_PORT, - inject={constants.PORT_ID: failed_amp_vrrp_port_id})) - - amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug( - name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG, - requires=(constants.AMPHORA, constants.LOADBALANCER, - constants.AMPHORAE_NETWORK_CONFIG))) - - # Plug member ports - amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta( - name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA, - requires=(constants.LOADBALANCER, constants.AMPHORA, - constants.AVAILABILITY_ZONE), - provides=constants.DELTA)) - - amp_for_failover_flow.add(network_tasks.HandleNetworkDelta( - name=prefix + '-' + constants.HANDLE_NETWORK_DELTA, - requires=(constants.AMPHORA, constants.DELTA), - provides=constants.UPDATED_PORTS)) - - amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( - name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG, - requires=(constants.LOADBALANCER, constants.UPDATED_PORTS))) - - return amp_for_failover_flow - - def get_failover_amphora_flow(self, failed_amphora, lb_amp_count): - """Get a Taskflow flow to failover an amphora. - - 1. Build a replacement amphora. - 2. Delete the old amphora. - 3. Update the amphorae listener configurations. - 4. Update the VRRP configurations if needed. - - :param failed_amphora: The amphora object to failover. - :param lb_amp_count: The number of amphora on this load balancer. - :returns: The flow that will provide the failover. - """ - failover_amp_flow = linear_flow.Flow( - constants.FAILOVER_AMPHORA_FLOW) - - # Revert amphora to status ERROR if this flow goes wrong - failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amphora})) - - if failed_amphora.role in (constants.ROLE_MASTER, - constants.ROLE_BACKUP): - amp_role = 'master_or_backup' - elif failed_amphora.role == constants.ROLE_STANDALONE: - amp_role = 'standalone' - else: - amp_role = 'undefined' - LOG.info("Performing failover for amphora: %s", - {"id": failed_amphora.id, - "load_balancer_id": failed_amphora.load_balancer_id, - "lb_network_ip": failed_amphora.lb_network_ip, - "compute_id": failed_amphora.compute_id, - "role": amp_role}) - - failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amphora})) - - failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amphora})) - - failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID( - requires=constants.LOADBALANCER_ID, - provides=constants.VIP_SG_ID)) - - is_vrrp_ipv6 = False - if failed_amphora.load_balancer_id: - if failed_amphora.vrrp_ip: - is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip) - - # Get a replacement amphora and plug all of the networking. - # - # Do this early as the compute services have been observed to be - # unreliable. The community decided the chance that deleting first - # would open resources for an instance is less likely than the - # compute service failing to boot an instance for other reasons. - - # TODO(johnsom) Move this back out to run for spares after - # delete amphora API is available. - failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow( - prefix=constants.FAILOVER_LOADBALANCER_FLOW, - role=failed_amphora.role, - failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id, - is_vrrp_ipv6=is_vrrp_ipv6)) - - failover_amp_flow.add( - self.get_delete_amphora_flow( - failed_amphora, - retry_attempts=CONF.controller_worker.amphora_delete_retries, - retry_interval=( - CONF.controller_worker.amphora_delete_retry_interval))) - failover_amp_flow.add( - database_tasks.DisableAmphoraHealthMonitoring( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amphora})) - - if not failed_amphora.load_balancer_id: - # This is an unallocated amphora (bogus), we are done. - return failover_amp_flow - - failover_amp_flow.add(database_tasks.GetLoadBalancer( - requires=constants.LOADBALANCER_ID, - inject={constants.LOADBALANCER_ID: - failed_amphora.load_balancer_id}, - provides=constants.LOADBALANCER)) - - failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( - name=constants.GET_AMPHORAE_FROM_LB, - requires=constants.LOADBALANCER_ID, - inject={constants.LOADBALANCER_ID: - failed_amphora.load_balancer_id}, - provides=constants.AMPHORAE)) - - # Setup timeouts for our requests to the amphorae - timeout_dict = { - constants.CONN_MAX_RETRIES: - CONF.haproxy_amphora.active_connection_max_retries, - constants.CONN_RETRY_INTERVAL: - CONF.haproxy_amphora.active_connection_retry_interval} - - # Listeners update needs to be run on all amphora to update - # their peer configurations. So parallelize this with an - # unordered subflow. - update_amps_subflow = unordered_flow.Flow( - constants.UPDATE_AMPS_SUBFLOW) - - for amp_index in range(0, lb_amp_count): - update_amps_subflow.add( - amphora_driver_tasks.AmphoraIndexListenerUpdate( - name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE, - requires=(constants.LOADBALANCER, constants.AMPHORAE), - inject={constants.AMPHORA_INDEX: amp_index, - constants.TIMEOUT_DICT: timeout_dict})) - - failover_amp_flow.add(update_amps_subflow) - - # Configure and enable keepalived in the amphora - if lb_amp_count == 2: - failover_amp_flow.add( - self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) - - # Reload the listener. This needs to be done here because - # it will create the required haproxy check scripts for - # the VRRP deployed above. - # A "U" or newer amphora-agent will remove the need for this - # task here. - # TODO(johnsom) Remove this in the "W" cycle - reload_listener_subflow = unordered_flow.Flow( - constants.AMPHORA_LISTENER_RELOAD_SUBFLOW) - - for amp_index in range(0, lb_amp_count): - reload_listener_subflow.add( - amphora_driver_tasks.AmphoraIndexListenersReload( - name=(str(amp_index) + '-' + - constants.AMPHORA_RELOAD_LISTENER), - requires=(constants.LOADBALANCER, constants.AMPHORAE), - inject={constants.AMPHORA_INDEX: amp_index, - constants.TIMEOUT_DICT: timeout_dict})) - - failover_amp_flow.add(reload_listener_subflow) - - # Remove any extraneous ports - # Note: Nova sometimes fails to delete ports attached to an instance. - # For example, if you create an LB with a listener, then - # 'openstack server delete' the amphora, you will see the vrrp - # port attached to that instance will remain after the instance - # is deleted. - # TODO(johnsom) Fix this as part of - # https://storyboard.openstack.org/#!/story/2007077 - - # Mark LB ACTIVE - failover_amp_flow.add( - database_tasks.MarkLBActiveInDB(mark_subobjects=True, - requires=constants.LOADBALANCER)) - - return failover_amp_flow diff --git a/octavia/controller/worker/v1/flows/health_monitor_flows.py b/octavia/controller/worker/v1/flows/health_monitor_flows.py deleted file mode 100644 index e1d537b6a2..0000000000 --- a/octavia/controller/worker/v1/flows/health_monitor_flows.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import model_tasks - - -class HealthMonitorFlows(object): - - def get_create_health_monitor_flow(self): - """Create a flow to create a health monitor - - :returns: The flow for creating a health monitor - """ - create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) - create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( - requires=[constants.HEALTH_MON, - constants.LISTENERS, - constants.LOADBALANCER])) - create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB( - requires=constants.HEALTH_MON)) - create_hm_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( - requires=constants.HEALTH_MON)) - create_hm_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return create_hm_flow - - def get_delete_health_monitor_flow(self): - """Create a flow to delete a health monitor - - :returns: The flow for deleting a health monitor - """ - delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) - delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( - requires=[constants.HEALTH_MON, - constants.LISTENERS, - constants.LOADBALANCER])) - delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB( - requires=constants.HEALTH_MON)) - delete_hm_flow.add(model_tasks. - DeleteModelObject(rebind={constants.OBJECT: - constants.HEALTH_MON})) - delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( - requires=constants.HEALTH_MON)) - delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota( - requires=constants.HEALTH_MON)) - delete_hm_flow.add( - database_tasks.UpdatePoolMembersOperatingStatusInDB( - requires=constants.POOL, - inject={constants.OPERATING_STATUS: constants.NO_MONITOR})) - delete_hm_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return delete_hm_flow - - def get_update_health_monitor_flow(self): - """Create a flow to update a health monitor - - :returns: The flow for updating a health monitor - """ - update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) - update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask( - requires=[constants.HEALTH_MON, - constants.LISTENERS, - constants.LOADBALANCER])) - update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB( - requires=constants.HEALTH_MON)) - update_hm_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_hm_flow.add(database_tasks.UpdateHealthMonInDB( - requires=[constants.HEALTH_MON, constants.UPDATE_DICT])) - update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB( - requires=constants.HEALTH_MON)) - update_hm_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return update_hm_flow diff --git a/octavia/controller/worker/v1/flows/l7policy_flows.py b/octavia/controller/worker/v1/flows/l7policy_flows.py deleted file mode 100644 index 3d71a5e827..0000000000 --- a/octavia/controller/worker/v1/flows/l7policy_flows.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2016 Blue Box, an IBM Company -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import model_tasks - - -class L7PolicyFlows(object): - - def get_create_l7policy_flow(self): - """Create a flow to create an L7 policy - - :returns: The flow for creating an L7 policy - """ - create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW) - create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( - requires=[constants.L7POLICY, - constants.LISTENERS, - constants.LOADBALANCER])) - create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB( - requires=constants.L7POLICY)) - create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( - requires=constants.L7POLICY)) - create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return create_l7policy_flow - - def get_delete_l7policy_flow(self): - """Create a flow to delete an L7 policy - - :returns: The flow for deleting an L7 policy - """ - delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW) - delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( - requires=[constants.L7POLICY, - constants.LISTENERS, - constants.LOADBALANCER])) - delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB( - requires=constants.L7POLICY)) - delete_l7policy_flow.add(model_tasks.DeleteModelObject( - rebind={constants.OBJECT: constants.L7POLICY})) - delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB( - requires=constants.L7POLICY)) - delete_l7policy_flow.add(database_tasks.DecrementL7policyQuota( - requires=constants.L7POLICY)) - delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return delete_l7policy_flow - - def get_update_l7policy_flow(self): - """Create a flow to update an L7 policy - - :returns: The flow for updating an L7 policy - """ - update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW) - update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask( - requires=[constants.L7POLICY, - constants.LISTENERS, - constants.LOADBALANCER])) - update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB( - requires=constants.L7POLICY)) - update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB( - requires=[constants.L7POLICY, constants.UPDATE_DICT])) - update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB( - requires=constants.L7POLICY)) - update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return update_l7policy_flow diff --git a/octavia/controller/worker/v1/flows/l7rule_flows.py b/octavia/controller/worker/v1/flows/l7rule_flows.py deleted file mode 100644 index c33e9e99bf..0000000000 --- a/octavia/controller/worker/v1/flows/l7rule_flows.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2016 Blue Box, an IBM Company -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import model_tasks - - -class L7RuleFlows(object): - - def get_create_l7rule_flow(self): - """Create a flow to create an L7 rule - - :returns: The flow for creating an L7 rule - """ - create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW) - create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( - requires=[constants.L7RULE, - constants.LISTENERS, - constants.LOADBALANCER])) - create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB( - requires=constants.L7RULE)) - create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( - requires=constants.L7RULE)) - create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( - requires=constants.L7POLICY)) - create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return create_l7rule_flow - - def get_delete_l7rule_flow(self): - """Create a flow to delete an L7 rule - - :returns: The flow for deleting an L7 rule - """ - delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW) - delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( - requires=[constants.L7RULE, - constants.LISTENERS, - constants.LOADBALANCER])) - delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB( - requires=constants.L7RULE)) - delete_l7rule_flow.add(model_tasks.DeleteModelObject( - rebind={constants.OBJECT: constants.L7RULE})) - delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB( - requires=constants.L7RULE)) - delete_l7rule_flow.add(database_tasks.DecrementL7ruleQuota( - requires=constants.L7RULE)) - delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( - requires=constants.L7POLICY)) - delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return delete_l7rule_flow - - def get_update_l7rule_flow(self): - """Create a flow to update an L7 rule - - :returns: The flow for updating an L7 rule - """ - update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW) - update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask( - requires=[constants.L7RULE, - constants.LISTENERS, - constants.LOADBALANCER])) - update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB( - requires=constants.L7RULE)) - update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB( - requires=[constants.L7RULE, constants.UPDATE_DICT])) - update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB( - requires=constants.L7RULE)) - update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB( - requires=constants.L7POLICY)) - update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return update_l7rule_flow diff --git a/octavia/controller/worker/v1/flows/listener_flows.py b/octavia/controller/worker/v1/flows/listener_flows.py deleted file mode 100644 index a6be82504e..0000000000 --- a/octavia/controller/worker/v1/flows/listener_flows.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import network_tasks - - -class ListenerFlows(object): - - def get_create_listener_flow(self): - """Create a flow to create a listener - - :returns: The flow for creating a listener - """ - create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) - create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - create_listener_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_listener_flow.add(network_tasks.UpdateVIP( - requires=constants.LOADBALANCER)) - create_listener_flow.add(database_tasks. - MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, - constants.LISTENERS])) - return create_listener_flow - - def get_create_all_listeners_flow(self): - """Create a flow to create all listeners - - :returns: The flow for creating all listeners - """ - create_all_listeners_flow = linear_flow.Flow( - constants.CREATE_LISTENERS_FLOW) - create_all_listeners_flow.add( - database_tasks.GetListenersFromLoadbalancer( - requires=constants.LOADBALANCER, - provides=constants.LISTENERS)) - create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer( - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER)) - create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_all_listeners_flow.add(network_tasks.UpdateVIP( - requires=constants.LOADBALANCER)) - return create_all_listeners_flow - - def get_delete_listener_flow(self): - """Create a flow to delete a listener - - :returns: The flow for deleting a listener - """ - delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) - delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask( - requires=constants.LISTENER)) - delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( - requires=constants.LISTENER)) - delete_listener_flow.add(network_tasks.UpdateVIPForDelete( - requires=constants.LOADBALANCER)) - delete_listener_flow.add(database_tasks.DeleteListenerInDB( - requires=constants.LISTENER)) - delete_listener_flow.add(database_tasks.DecrementListenerQuota( - requires=constants.LISTENER)) - delete_listener_flow.add(database_tasks.MarkLBActiveInDB( - requires=constants.LOADBALANCER)) - - return delete_listener_flow - - def get_delete_listener_internal_flow(self, listener_name): - """Create a flow to delete a listener and l7policies internally - - (will skip deletion on the amp and marking LB active) - - :returns: The flow for deleting a listener - """ - delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) - # Should cascade delete all L7 policies - delete_listener_flow.add(network_tasks.UpdateVIPForDelete( - name='delete_update_vip_' + listener_name, - requires=constants.LOADBALANCER)) - delete_listener_flow.add(database_tasks.DeleteListenerInDB( - name='delete_listener_in_db_' + listener_name, - requires=constants.LISTENER, - rebind={constants.LISTENER: listener_name})) - delete_listener_flow.add(database_tasks.DecrementListenerQuota( - name='decrement_listener_quota_' + listener_name, - requires=constants.LISTENER, - rebind={constants.LISTENER: listener_name})) - - return delete_listener_flow - - def get_update_listener_flow(self): - """Create a flow to update a listener - - :returns: The flow for updating a listener - """ - update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) - update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - update_listener_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_listener_flow.add(network_tasks.UpdateVIP( - requires=constants.LOADBALANCER)) - update_listener_flow.add(database_tasks.UpdateListenerInDB( - requires=[constants.LISTENER, constants.UPDATE_DICT])) - update_listener_flow.add(database_tasks. - MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, - constants.LISTENERS])) - - return update_listener_flow diff --git a/octavia/controller/worker/v1/flows/load_balancer_flows.py b/octavia/controller/worker/v1/flows/load_balancer_flows.py deleted file mode 100644 index 4fb87e8972..0000000000 --- a/octavia/controller/worker/v1/flows/load_balancer_flows.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# Copyright 2020 Red Hat, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_config import cfg -from oslo_log import log as logging -from taskflow.patterns import linear_flow -from taskflow.patterns import unordered_flow - -from octavia.common import constants -from octavia.common import exceptions -from octavia.common import utils -from octavia.controller.worker.v1.flows import amphora_flows -from octavia.controller.worker.v1.flows import listener_flows -from octavia.controller.worker.v1.flows import member_flows -from octavia.controller.worker.v1.flows import pool_flows -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import compute_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import network_tasks - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class LoadBalancerFlows(object): - - def __init__(self): - self.amp_flows = amphora_flows.AmphoraFlows() - self.listener_flows = listener_flows.ListenerFlows() - self.pool_flows = pool_flows.PoolFlows() - self.member_flows = member_flows.MemberFlows() - - def get_create_load_balancer_flow(self, topology, listeners=None): - """Creates a conditional graph flow that allocates a loadbalancer. - - :raises InvalidTopology: Invalid topology specified - :return: The graph flow for creating a loadbalancer. - """ - f_name = constants.CREATE_LOADBALANCER_FLOW - lb_create_flow = linear_flow.Flow(f_name) - - lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask( - requires=constants.LOADBALANCER_ID)) - - # allocate VIP - lb_create_flow.add(database_tasks.ReloadLoadBalancer( - name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP, - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER - )) - lb_create_flow.add(network_tasks.AllocateVIP( - requires=constants.LOADBALANCER, - provides=constants.VIP)) - lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation( - requires=(constants.LOADBALANCER_ID, constants.VIP), - provides=constants.LOADBALANCER)) - lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( - requires=constants.LOADBALANCER_ID)) - lb_create_flow.add(network_tasks.GetSubnetFromVIP( - requires=constants.LOADBALANCER, - provides=constants.SUBNET)) - - if topology == constants.TOPOLOGY_ACTIVE_STANDBY: - lb_create_flow.add(*self._create_active_standby_topology()) - elif topology == constants.TOPOLOGY_SINGLE: - lb_create_flow.add(*self._create_single_topology()) - else: - LOG.error("Unknown topology: %s. Unable to build load balancer.", - topology) - raise exceptions.InvalidTopology(topology=topology) - - post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW - lb_create_flow.add( - self.get_post_lb_amp_association_flow(post_amp_prefix, topology)) - - if listeners: - lb_create_flow.add(*self._create_listeners_flow()) - - lb_create_flow.add( - database_tasks.MarkLBActiveInDB( - mark_subobjects=True, - requires=constants.LOADBALANCER - ) - ) - return lb_create_flow - - def _create_single_topology(self): - sf_name = (constants.ROLE_STANDALONE + '-' + - constants.AMP_PLUG_NET_SUBFLOW) - amp_for_lb_net_flow = linear_flow.Flow(sf_name) - amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow( - prefix=constants.ROLE_STANDALONE, - role=constants.ROLE_STANDALONE) - amp_for_lb_net_flow.add(amp_for_lb_flow) - amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name)) - return amp_for_lb_net_flow - - def _create_active_standby_topology( - self, lf_name=constants.CREATE_LOADBALANCER_FLOW): - # When we boot up amphora for an active/standby topology, - # we should leverage the Nova anti-affinity capabilities - # to place the amphora on different hosts, also we need to check - # if anti-affinity-flag is enabled or not: - anti_affinity = CONF.nova.enable_anti_affinity - flows = [] - if anti_affinity: - # we need to create a server group first - flows.append( - compute_tasks.NovaServerGroupCreate( - name=lf_name + '-' + - constants.CREATE_SERVER_GROUP_FLOW, - requires=(constants.LOADBALANCER_ID), - provides=constants.SERVER_GROUP_ID)) - - # update server group id in lb table - flows.append( - database_tasks.UpdateLBServerGroupInDB( - name=lf_name + '-' + - constants.UPDATE_LB_SERVERGROUPID_FLOW, - requires=(constants.LOADBALANCER_ID, - constants.SERVER_GROUP_ID))) - - f_name = constants.CREATE_LOADBALANCER_FLOW - amps_flow = unordered_flow.Flow(f_name) - - master_sf_name = (constants.ROLE_MASTER + '-' + - constants.AMP_PLUG_NET_SUBFLOW) - master_amp_sf = linear_flow.Flow(master_sf_name) - master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( - prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER)) - master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name)) - - backup_sf_name = (constants.ROLE_BACKUP + '-' + - constants.AMP_PLUG_NET_SUBFLOW) - backup_amp_sf = linear_flow.Flow(backup_sf_name) - backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow( - prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)) - backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name)) - - amps_flow.add(master_amp_sf, backup_amp_sf) - - return flows + [amps_flow] - - def _get_amp_net_subflow(self, sf_name): - flows = [] - flows.append(network_tasks.PlugVIPAmpphora( - name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, - requires=(constants.LOADBALANCER, constants.AMPHORA, - constants.SUBNET), - provides=constants.AMP_DATA)) - - flows.append(network_tasks.ApplyQosAmphora( - name=sf_name + '-' + constants.APPLY_QOS_AMP, - requires=(constants.LOADBALANCER, constants.AMP_DATA, - constants.UPDATE_DICT))) - flows.append(database_tasks.UpdateAmphoraVIPData( - name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA, - requires=constants.AMP_DATA)) - flows.append(database_tasks.ReloadAmphora( - name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP, - requires=constants.AMPHORA_ID, - provides=constants.AMPHORA)) - flows.append(database_tasks.ReloadLoadBalancer( - name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP, - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER)) - flows.append(network_tasks.GetAmphoraNetworkConfigs( - name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, - requires=(constants.LOADBALANCER, constants.AMPHORA), - provides=constants.AMPHORA_NETWORK_CONFIG)) - flows.append(amphora_driver_tasks.AmphoraPostVIPPlug( - name=sf_name + '-' + constants.AMP_POST_VIP_PLUG, - rebind={constants.AMPHORAE_NETWORK_CONFIG: - constants.AMPHORA_NETWORK_CONFIG}, - requires=(constants.LOADBALANCER, - constants.AMPHORAE_NETWORK_CONFIG))) - return flows - - def _create_listeners_flow(self): - flows = [] - flows.append( - database_tasks.ReloadLoadBalancer( - name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH, - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER - ) - ) - flows.append( - network_tasks.CalculateDelta( - requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), - provides=constants.DELTAS - ) - ) - flows.append( - network_tasks.HandleNetworkDeltas( - requires=constants.DELTAS, provides=constants.UPDATED_PORTS - ) - ) - flows.append( - network_tasks.GetAmphoraeNetworkConfigs( - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE_NETWORK_CONFIG - ) - ) - flows.append( - amphora_driver_tasks.AmphoraePostNetworkPlug( - requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, - constants.AMPHORAE_NETWORK_CONFIG) - ) - ) - flows.append( - self.listener_flows.get_create_all_listeners_flow() - ) - return flows - - def get_post_lb_amp_association_flow(self, prefix, topology): - """Reload the loadbalancer and create networking subflows for - - created/allocated amphorae. - :return: Post amphorae association subflow - """ - sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW - post_create_LB_flow = linear_flow.Flow(sf_name) - post_create_LB_flow.add( - database_tasks.ReloadLoadBalancer( - name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER)) - - if topology == constants.TOPOLOGY_ACTIVE_STANDBY: - post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE)) - - vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) - post_create_LB_flow.add(vrrp_subflow) - - post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( - requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) - return post_create_LB_flow - - def _get_delete_listeners_flow(self, lb): - """Sets up an internal delete flow - - Because task flow doesn't support loops we store each listener - we want to delete in the store part and then rebind - - :param lb: load balancer - :return: (flow, store) -- flow for the deletion and store with all - the listeners stored properly - """ - listeners_delete_flow = unordered_flow.Flow('listener_delete_flow') - store = {} - for listener in lb.listeners: - listener_name = 'listener_' + listener.id - store[listener_name] = listener - listeners_delete_flow.add( - self.listener_flows.get_delete_listener_internal_flow( - listener_name)) - return (listeners_delete_flow, store) - - def get_delete_load_balancer_flow(self, lb): - """Creates a flow to delete a load balancer. - - :returns: The flow for deleting a load balancer - """ - return self._get_delete_load_balancer_flow(lb, False) - - def _get_delete_pools_flow(self, lb): - """Sets up an internal delete flow - - Because task flow doesn't support loops we store each pool - we want to delete in the store part and then rebind - - :param lb: load balancer - :return: (flow, store) -- flow for the deletion and store with all - the listeners stored properly - """ - pools_delete_flow = unordered_flow.Flow('pool_delete_flow') - store = {} - for pool in lb.pools: - pool_name = 'pool' + pool.id - store[pool_name] = pool - pools_delete_flow.add( - self.pool_flows.get_delete_pool_flow_internal( - pool_name)) - return (pools_delete_flow, store) - - def _get_delete_load_balancer_flow(self, lb, cascade): - store = {} - delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) - delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(compute_tasks.NovaServerGroupDelete( - requires=constants.SERVER_GROUP_ID)) - delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( - requires=constants.LOADBALANCER)) - if cascade: - (listeners_delete, store) = self._get_delete_listeners_flow(lb) - (pools_delete, pool_store) = self._get_delete_pools_flow(lb) - store.update(pool_store) - delete_LB_flow.add(pools_delete) - delete_LB_flow.add(listeners_delete) - delete_LB_flow.add(network_tasks.UnplugVIP( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(network_tasks.DeallocateVIP( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( - requires=constants.LOADBALANCER)) - delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota( - requires=constants.LOADBALANCER)) - return (delete_LB_flow, store) - - def get_cascade_delete_load_balancer_flow(self, lb): - """Creates a flow to delete a load balancer. - - :returns: The flow for deleting a load balancer - """ - return self._get_delete_load_balancer_flow(lb, True) - - def get_update_load_balancer_flow(self): - """Creates a flow to update a load balancer. - - :returns: The flow for update a load balancer - """ - update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) - update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( - requires=constants.LOADBALANCER)) - update_LB_flow.add(network_tasks.ApplyQos( - requires=(constants.LOADBALANCER, constants.UPDATE_DICT))) - update_LB_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( - requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) - update_LB_flow.add(database_tasks.MarkLBActiveInDB( - requires=constants.LOADBALANCER)) - - return update_LB_flow - - def get_failover_LB_flow(self, amps, lb): - """Failover a load balancer. - - 1. Validate the VIP port is correct and present. - 2. Build a replacement amphora. - 3. Delete the failed amphora. - 4. Configure the replacement amphora listeners. - 5. Configure VRRP for the listeners. - 6. Build the second replacement amphora. - 7. Delete the second failed amphora. - 8. Delete any extraneous amphora. - 9. Configure the listeners on the new amphorae. - 10. Configure the VRRP on the new amphorae. - 11. Reload the listener configurations to pick up VRRP changes. - 12. Mark the load balancer back to ACTIVE. - - :returns: The flow that will provide the failover. - """ - # Pick one amphora to be failed over if any exist. - failed_amp = None - if amps: - failed_amp = amps.pop() - - failover_LB_flow = linear_flow.Flow( - constants.FAILOVER_LOADBALANCER_FLOW) - - # Revert LB to provisioning_status ERROR if this flow goes wrong - failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( - requires=constants.LOADBALANCER)) - - # Setup timeouts for our requests to the amphorae - timeout_dict = { - constants.CONN_MAX_RETRIES: - CONF.haproxy_amphora.active_connection_max_retries, - constants.CONN_RETRY_INTERVAL: - CONF.haproxy_amphora.active_connection_retry_interval} - - if failed_amp: - if failed_amp.role in (constants.ROLE_MASTER, - constants.ROLE_BACKUP): - amp_role = 'master_or_backup' - elif failed_amp.role == constants.ROLE_STANDALONE: - amp_role = 'standalone' - else: - amp_role = 'undefined' - LOG.info("Performing failover for amphora: %s", - {"id": failed_amp.id, - "load_balancer_id": lb.id, - "lb_network_ip": failed_amp.lb_network_ip, - "compute_id": failed_amp.compute_id, - "role": amp_role}) - - failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amp})) - - failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amp})) - - # Check that the VIP port exists and is ok - failover_LB_flow.add( - network_tasks.AllocateVIPforFailover( - requires=constants.LOADBALANCER, provides=constants.VIP)) - - # Update the database with the VIP information - failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation( - requires=(constants.LOADBALANCER_ID, constants.VIP), - provides=constants.LOADBALANCER)) - - # Make sure the SG has the correct rules and re-apply to the - # VIP port. It is not used on the VIP port, but will help lock - # the SG as in use. - failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup( - requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID)) - - new_amp_role = constants.ROLE_STANDALONE - if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - new_amp_role = constants.ROLE_BACKUP - - # Get a replacement amphora and plug all of the networking. - # - # Do this early as the compute services have been observed to be - # unreliable. The community decided the chance that deleting first - # would open resources for an instance is less likely than the compute - # service failing to boot an instance for other reasons. - if failed_amp: - failed_vrrp_is_ipv6 = False - if failed_amp.vrrp_ip: - failed_vrrp_is_ipv6 = utils.is_ipv6(failed_amp.vrrp_ip) - failover_LB_flow.add( - self.amp_flows.get_amphora_for_lb_failover_subflow( - prefix=constants.FAILOVER_LOADBALANCER_FLOW, - role=new_amp_role, - failed_amp_vrrp_port_id=failed_amp.vrrp_port_id, - is_vrrp_ipv6=failed_vrrp_is_ipv6)) - else: - failover_LB_flow.add( - self.amp_flows.get_amphora_for_lb_failover_subflow( - prefix=constants.FAILOVER_LOADBALANCER_FLOW, - role=new_amp_role)) - - if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB( - name=constants.MARK_AMP_BACKUP_INDB, - requires=constants.AMPHORA)) - - # Delete the failed amp - if failed_amp: - failover_LB_flow.add( - self.amp_flows.get_delete_amphora_flow(failed_amp)) - - # Update the data stored in the flow from the database - failover_LB_flow.add(database_tasks.ReloadLoadBalancer( - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER)) - - # Configure the listener(s) - # We will run update on this amphora again later if this is - # an active/standby load balancer because we want this amp - # functional as soon as possible. It must run again to update - # the configurations for the new peers. - failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate( - name=constants.AMP_LISTENER_UPDATE, - requires=(constants.LOADBALANCER, constants.AMPHORA), - inject={constants.TIMEOUT_DICT: timeout_dict})) - - # Bring up the new "backup" amphora VIP now to reduce the outage - # on the final failover. This dropped the outage from 8-9 seconds - # to less than one in my lab. - # This does mean some steps have to be repeated later to reconfigure - # for the second amphora as a peer. - if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - - failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB( - name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, - requires=constants.LOADBALANCER_ID)) - - failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID( - name=(new_amp_role + '-' + - constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID), - requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID), - provides=constants.FIRST_AMP_NETWORK_CONFIGS)) - - failover_LB_flow.add( - amphora_driver_tasks.AmphoraUpdateVRRPInterface( - name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF, - requires=constants.AMPHORA, - inject={constants.TIMEOUT_DICT: timeout_dict}, - provides=constants.FIRST_AMP_VRRP_INTERFACE)) - - failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate( - name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE, - requires=(constants.LOADBALANCER_ID, constants.AMPHORA), - rebind={constants.AMPHORAE_NETWORK_CONFIG: - constants.FIRST_AMP_NETWORK_CONFIGS, - constants.AMP_VRRP_INT: - constants.FIRST_AMP_VRRP_INTERFACE}, - inject={constants.TIMEOUT_DICT: timeout_dict})) - - failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart( - name=new_amp_role + '-' + constants.AMP_VRRP_START, - requires=constants.AMPHORA, - inject={constants.TIMEOUT_DICT: timeout_dict})) - - # Start the listener. This needs to be done here because - # it will create the required haproxy check scripts for - # the VRRP deployed above. - # A "V" or newer amphora-agent will remove the need for this - # task here. - # TODO(johnsom) Remove this in the "X" cycle - failover_LB_flow.add(amphora_driver_tasks.ListenersStart( - name=new_amp_role + '-' + constants.AMP_LISTENER_START, - requires=(constants.LOADBALANCER, constants.AMPHORA))) - - # #### Work on standby amphora if needed ##### - - new_amp_role = constants.ROLE_MASTER - failed_amp = None - if amps: - failed_amp = amps.pop() - - if failed_amp: - if failed_amp.role in (constants.ROLE_MASTER, - constants.ROLE_BACKUP): - amp_role = 'master_or_backup' - elif failed_amp.role == constants.ROLE_STANDALONE: - amp_role = 'standalone' - else: - amp_role = 'undefined' - LOG.info("Performing failover for amphora: %s", - {"id": failed_amp.id, - "load_balancer_id": lb.id, - "lb_network_ip": failed_amp.lb_network_ip, - "compute_id": failed_amp.compute_id, - "role": amp_role}) - - failover_LB_flow.add( - database_tasks.MarkAmphoraPendingDeleteInDB( - name=(new_amp_role + '-' + - constants.MARK_AMPHORA_PENDING_DELETE), - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amp})) - - failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy( - name=(new_amp_role + '-' + - constants.MARK_AMPHORA_HEALTH_BUSY), - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amp})) - - # Get a replacement amphora and plug all of the networking. - # - # Do this early as the compute services have been observed to be - # unreliable. The community decided the chance that deleting first - # would open resources for an instance is less likely than the - # compute service failing to boot an instance for other reasons. - failover_LB_flow.add( - self.amp_flows.get_amphora_for_lb_failover_subflow( - prefix=(new_amp_role + '-' + - constants.FAILOVER_LOADBALANCER_FLOW), - role=new_amp_role)) - - failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB( - name=constants.MARK_AMP_MASTER_INDB, - requires=constants.AMPHORA)) - - # Delete the failed amp - if failed_amp: - failover_LB_flow.add( - self.amp_flows.get_delete_amphora_flow( - failed_amp)) - failover_LB_flow.add( - database_tasks.DisableAmphoraHealthMonitoring( - name=(new_amp_role + '-' + - constants.DISABLE_AMP_HEALTH_MONITORING), - requires=constants.AMPHORA, - inject={constants.AMPHORA: failed_amp})) - - # Remove any extraneous amphora - # Note: This runs in all topology situations. - # It should run before the act/stdby final listener update so - # that we don't bother attempting to update dead amphorae. - delete_extra_amps_flow = unordered_flow.Flow( - constants.DELETE_EXTRA_AMPHORAE_FLOW) - for amp in amps: - LOG.debug('Found extraneous amphora %s on load balancer %s. ' - 'Deleting.', amp.id, lb.id) - delete_extra_amps_flow.add( - self.amp_flows.get_delete_amphora_flow(amp)) - - failover_LB_flow.add(delete_extra_amps_flow) - - if lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY: - # Update the data stored in the flow from the database - failover_LB_flow.add(database_tasks.ReloadLoadBalancer( - name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC, - requires=constants.LOADBALANCER_ID, - provides=constants.LOADBALANCER)) - - failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer( - name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB, - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE)) - - # Listeners update needs to be run on all amphora to update - # their peer configurations. So parallelize this with an - # unordered subflow. - update_amps_subflow = unordered_flow.Flow( - constants.UPDATE_AMPS_SUBFLOW) - - # Setup parallel flows for each amp. We don't know the new amp - # details at flow creation time, so setup a subflow for each - # amp on the LB, they let the task index into a list of amps - # to find the amphora it should work on. - update_amps_subflow.add( - amphora_driver_tasks.AmphoraIndexListenerUpdate( - name=(constants.AMPHORA + '-0-' + - constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), - inject={constants.AMPHORA_INDEX: 0, - constants.TIMEOUT_DICT: timeout_dict})) - update_amps_subflow.add( - amphora_driver_tasks.AmphoraIndexListenerUpdate( - name=(constants.AMPHORA + '-1-' + - constants.AMP_LISTENER_UPDATE), - requires=(constants.LOADBALANCER, constants.AMPHORAE), - inject={constants.AMPHORA_INDEX: 1, - constants.TIMEOUT_DICT: timeout_dict})) - - failover_LB_flow.add(update_amps_subflow) - - # Configure and enable keepalived in the amphora - failover_LB_flow.add(self.amp_flows.get_vrrp_subflow( - new_amp_role + '-' + constants.GET_VRRP_SUBFLOW, - timeout_dict, create_vrrp_group=False)) - - # #### End of standby #### - - # Reload the listener. This needs to be done here because - # it will create the required haproxy check scripts for - # the VRRP deployed above. - # A "V" or newer amphora-agent will remove the need for this - # task here. - # TODO(johnsom) Remove this in the "X" cycle - failover_LB_flow.add( - amphora_driver_tasks.AmphoraIndexListenersReload( - name=(new_amp_role + '-' + - constants.AMPHORA_RELOAD_LISTENER), - requires=(constants.LOADBALANCER, constants.AMPHORAE), - inject={constants.AMPHORA_INDEX: 1, - constants.TIMEOUT_DICT: timeout_dict})) - - # Remove any extraneous ports - # Note: Nova sometimes fails to delete ports attached to an instance. - # For example, if you create an LB with a listener, then - # 'openstack server delete' the amphora, you will see the vrrp - # port attached to that instance will remain after the instance - # is deleted. - # TODO(johnsom) Fix this as part of - # https://storyboard.openstack.org/#!/story/2007077 - - # Mark LB ACTIVE - failover_LB_flow.add( - database_tasks.MarkLBActiveInDB(mark_subobjects=True, - requires=constants.LOADBALANCER)) - - return failover_LB_flow diff --git a/octavia/controller/worker/v1/flows/member_flows.py b/octavia/controller/worker/v1/flows/member_flows.py deleted file mode 100644 index c879f5fa0d..0000000000 --- a/octavia/controller/worker/v1/flows/member_flows.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow -from taskflow.patterns import unordered_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import model_tasks -from octavia.controller.worker.v1.tasks import network_tasks - - -class MemberFlows(object): - - def get_create_member_flow(self): - """Create a flow to create a member - - :returns: The flow for creating a member - """ - create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) - create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( - requires=[constants.MEMBER, - constants.LISTENERS, - constants.LOADBALANCER, - constants.POOL])) - create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB( - requires=constants.MEMBER)) - create_member_flow.add(network_tasks.CalculateDelta( - requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), - provides=constants.DELTAS)) - create_member_flow.add(network_tasks.HandleNetworkDeltas( - requires=(constants.DELTAS, constants.LOADBALANCER), - provides=constants.UPDATED_PORTS)) - create_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs( - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE_NETWORK_CONFIG)) - create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( - requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, - constants.AMPHORAE_NETWORK_CONFIG))) - create_member_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_member_flow.add(database_tasks.MarkMemberActiveInDB( - requires=constants.MEMBER)) - create_member_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - create_member_flow.add(database_tasks. - MarkLBAndListenersActiveInDB( - requires=(constants.LOADBALANCER, - constants.LISTENERS))) - - return create_member_flow - - def get_delete_member_flow(self): - """Create a flow to delete a member - - :returns: The flow for deleting a member - """ - delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) - delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( - requires=[constants.MEMBER, - constants.LISTENERS, - constants.LOADBALANCER, - constants.POOL])) - delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB( - requires=constants.MEMBER)) - delete_member_flow.add(network_tasks.CalculateDelta( - requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), - provides=constants.DELTAS)) - delete_member_flow.add(network_tasks.HandleNetworkDeltas( - requires=(constants.DELTAS, constants.LOADBALANCER), - provides=constants.UPDATED_PORTS)) - delete_member_flow.add(network_tasks.GetAmphoraeNetworkConfigs( - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE_NETWORK_CONFIG)) - delete_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug( - requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, - constants.AMPHORAE_NETWORK_CONFIG))) - delete_member_flow.add(model_tasks. - DeleteModelObject(rebind={constants.OBJECT: - constants.MEMBER})) - delete_member_flow.add(database_tasks.DeleteMemberInDB( - requires=constants.MEMBER)) - delete_member_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - delete_member_flow.add(database_tasks.DecrementMemberQuota( - requires=constants.MEMBER)) - delete_member_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - delete_member_flow.add(database_tasks. - MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, - constants.LISTENERS])) - - return delete_member_flow - - def get_update_member_flow(self): - """Create a flow to update a member - - :returns: The flow for updating a member - """ - update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) - update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask( - requires=[constants.MEMBER, - constants.LISTENERS, - constants.LOADBALANCER, - constants.POOL])) - update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB( - requires=constants.MEMBER)) - update_member_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_member_flow.add(database_tasks.UpdateMemberInDB( - requires=[constants.MEMBER, constants.UPDATE_DICT])) - update_member_flow.add(database_tasks.MarkMemberActiveInDB( - requires=constants.MEMBER)) - update_member_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - update_member_flow.add(database_tasks. - MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, - constants.LISTENERS])) - - return update_member_flow - - def get_batch_update_members_flow(self, old_members, new_members, - updated_members): - """Create a flow to batch update members - - :returns: The flow for batch updating members - """ - batch_update_members_flow = linear_flow.Flow( - constants.BATCH_UPDATE_MEMBERS_FLOW) - unordered_members_flow = unordered_flow.Flow( - constants.UNORDERED_MEMBER_UPDATES_FLOW) - unordered_members_active_flow = unordered_flow.Flow( - constants.UNORDERED_MEMBER_ACTIVE_FLOW) - - # Delete old members - unordered_members_flow.add( - lifecycle_tasks.MembersToErrorOnRevertTask( - inject={constants.MEMBERS: old_members}, - name='{flow}-deleted'.format( - flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) - for m in old_members: - unordered_members_flow.add( - model_tasks.DeleteModelObject( - inject={constants.OBJECT: m}, - name='{flow}-{id}'.format( - id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW))) - unordered_members_flow.add(database_tasks.DeleteMemberInDB( - inject={constants.MEMBER: m}, - name='{flow}-{id}'.format( - id=m.id, flow=constants.DELETE_MEMBER_INDB))) - unordered_members_flow.add(database_tasks.DecrementMemberQuota( - inject={constants.MEMBER: m}, - name='{flow}-{id}'.format( - id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW))) - - # Create new members - unordered_members_flow.add( - lifecycle_tasks.MembersToErrorOnRevertTask( - inject={constants.MEMBERS: new_members}, - name='{flow}-created'.format( - flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) - for m in new_members: - unordered_members_active_flow.add( - database_tasks.MarkMemberActiveInDB( - inject={constants.MEMBER: m}, - name='{flow}-{id}'.format( - id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) - - # Update existing members - unordered_members_flow.add( - lifecycle_tasks.MembersToErrorOnRevertTask( - # updated_members is a list of (obj, dict), only pass `obj` - inject={constants.MEMBERS: [m[0] for m in updated_members]}, - name='{flow}-updated'.format( - flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW))) - for m, um in updated_members: - um.pop('id', None) - unordered_members_active_flow.add( - database_tasks.MarkMemberActiveInDB( - inject={constants.MEMBER: m}, - name='{flow}-{id}'.format( - id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB))) - - batch_update_members_flow.add(unordered_members_flow) - - # Done, do real updates - batch_update_members_flow.add(network_tasks.CalculateDelta( - requires=(constants.LOADBALANCER, constants.AVAILABILITY_ZONE), - provides=constants.DELTAS)) - batch_update_members_flow.add(network_tasks.HandleNetworkDeltas( - requires=(constants.DELTAS, constants.LOADBALANCER), - provides=constants.UPDATED_PORTS)) - batch_update_members_flow.add(network_tasks.GetAmphoraeNetworkConfigs( - requires=constants.LOADBALANCER_ID, - provides=constants.AMPHORAE_NETWORK_CONFIG)) - batch_update_members_flow.add( - amphora_driver_tasks.AmphoraePostNetworkPlug( - requires=(constants.LOADBALANCER, constants.UPDATED_PORTS, - constants.AMPHORAE_NETWORK_CONFIG))) - - # Update the Listener (this makes the changes active on the Amp) - batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - - # Mark all the members ACTIVE here, then pool then LB/Listeners - batch_update_members_flow.add(unordered_members_active_flow) - batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - batch_update_members_flow.add( - database_tasks.MarkLBAndListenersActiveInDB( - requires=(constants.LOADBALANCER, - constants.LISTENERS))) - - return batch_update_members_flow diff --git a/octavia/controller/worker/v1/flows/pool_flows.py b/octavia/controller/worker/v1/flows/pool_flows.py deleted file mode 100644 index 78c67ebdfd..0000000000 --- a/octavia/controller/worker/v1/flows/pool_flows.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow - -from octavia.common import constants -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.controller.worker.v1.tasks import lifecycle_tasks -from octavia.controller.worker.v1.tasks import model_tasks - - -class PoolFlows(object): - - def get_create_pool_flow(self): - """Create a flow to create a pool - - :returns: The flow for creating a pool - """ - create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) - create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( - requires=[constants.POOL, - constants.LISTENERS, - constants.LOADBALANCER])) - create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB( - requires=constants.POOL)) - create_pool_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - create_pool_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return create_pool_flow - - def get_delete_pool_flow(self): - """Create a flow to delete a pool - - :returns: The flow for deleting a pool - """ - delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) - delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( - requires=[constants.POOL, - constants.LISTENERS, - constants.LOADBALANCER])) - delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( - requires=constants.POOL)) - delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( - requires=constants.POOL, provides=constants.POOL_CHILD_COUNT)) - delete_pool_flow.add(model_tasks.DeleteModelObject( - rebind={constants.OBJECT: constants.POOL})) - delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - delete_pool_flow.add(database_tasks.DeletePoolInDB( - requires=constants.POOL)) - delete_pool_flow.add(database_tasks.DecrementPoolQuota( - requires=[constants.POOL, constants.POOL_CHILD_COUNT])) - delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return delete_pool_flow - - def get_delete_pool_flow_internal(self, name): - """Create a flow to delete a pool, etc. - - :returns: The flow for deleting a pool - """ - delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) - # health monitor should cascade - # members should cascade - delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( - name='mark_pool_pending_delete_in_db_' + name, - requires=constants.POOL, - rebind={constants.POOL: name})) - delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( - name='count_pool_children_for_quota_' + name, - requires=constants.POOL, - provides=constants.POOL_CHILD_COUNT, - rebind={constants.POOL: name})) - delete_pool_flow.add(model_tasks.DeleteModelObject( - name='delete_model_object_' + name, - rebind={constants.OBJECT: name})) - delete_pool_flow.add(database_tasks.DeletePoolInDB( - name='delete_pool_in_db_' + name, - requires=constants.POOL, - rebind={constants.POOL: name})) - delete_pool_flow.add(database_tasks.DecrementPoolQuota( - name='decrement_pool_quota_' + name, - requires=[constants.POOL, constants.POOL_CHILD_COUNT], - rebind={constants.POOL: name})) - - return delete_pool_flow - - def get_update_pool_flow(self): - """Create a flow to update a pool - - :returns: The flow for updating a pool - """ - update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) - update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask( - requires=[constants.POOL, - constants.LISTENERS, - constants.LOADBALANCER])) - update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB( - requires=constants.POOL)) - update_pool_flow.add(amphora_driver_tasks.ListenersUpdate( - requires=constants.LOADBALANCER)) - update_pool_flow.add(database_tasks.UpdatePoolInDB( - requires=[constants.POOL, constants.UPDATE_DICT])) - update_pool_flow.add(database_tasks.MarkPoolActiveInDB( - requires=constants.POOL)) - update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB( - requires=[constants.LOADBALANCER, constants.LISTENERS])) - - return update_pool_flow diff --git a/octavia/controller/worker/v1/tasks/__init__.py b/octavia/controller/worker/v1/tasks/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/controller/worker/v1/tasks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py b/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py deleted file mode 100644 index beb3497588..0000000000 --- a/octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cryptography import fernet -from oslo_config import cfg -from oslo_log import log as logging -from stevedore import driver as stevedore_driver -from taskflow import task -from taskflow.types import failure - -from octavia.amphorae.backends.agent import agent_jinja_cfg -from octavia.amphorae.driver_exceptions import exceptions as driver_except -from octavia.common import constants -from octavia.common import utils -from octavia.controller.worker import task_utils as task_utilities -from octavia.db import api as db_apis -from octavia.db import repositories as repo - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class BaseAmphoraTask(task.Task): - """Base task to load drivers common to the tasks.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.amphora_driver = stevedore_driver.DriverManager( - namespace='octavia.amphora.drivers', - name=CONF.controller_worker.amphora_driver, - invoke_on_load=True - ).driver - self.amphora_repo = repo.AmphoraRepository() - self.listener_repo = repo.ListenerRepository() - self.loadbalancer_repo = repo.LoadBalancerRepository() - self.task_utils = task_utilities.TaskUtils() - - -class AmpListenersUpdate(BaseAmphoraTask): - """Task to update the listeners on one amphora.""" - - def execute(self, loadbalancer, amphora, timeout_dict=None): - # Note, we don't want this to cause a revert as it may be used - # in a failover flow with both amps failing. Skip it and let - # health manager fix it. - try: - # Make sure we have a fresh load balancer object - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer.id) - self.amphora_driver.update_amphora_listeners( - loadbalancer, amphora, timeout_dict) - except Exception as e: - LOG.error('Failed to update listeners on amphora %s. Skipping ' - 'this amphora as it is failing to update due to: %s', - amphora.id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.ERROR) - - -class AmphoraIndexListenerUpdate(BaseAmphoraTask): - """Task to update the listeners on one amphora.""" - - def execute(self, loadbalancer, amphora_index, amphorae, - timeout_dict=None): - # Note, we don't want this to cause a revert as it may be used - # in a failover flow with both amps failing. Skip it and let - # health manager fix it. - try: - # Make sure we have a fresh load balancer object - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer.id) - self.amphora_driver.update_amphora_listeners( - loadbalancer, amphorae[amphora_index], timeout_dict) - except Exception as e: - amphora_id = amphorae[amphora_index].id - LOG.error('Failed to update listeners on amphora %s. Skipping ' - 'this amphora as it is failing to update due to: %s', - amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) - - -class ListenersUpdate(BaseAmphoraTask): - """Task to update amphora with all specified listeners' configurations.""" - - def execute(self, loadbalancer): - """Execute updates per listener for an amphora.""" - self.amphora_driver.update(loadbalancer) - - def revert(self, loadbalancer, *args, **kwargs): - """Handle failed listeners updates.""" - - LOG.warning("Reverting listeners updates.") - - for listener in loadbalancer.listeners: - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class ListenersStart(BaseAmphoraTask): - """Task to start all listeners on the vip.""" - - def execute(self, loadbalancer, amphora=None): - """Execute listener start routines for listeners on an amphora.""" - if loadbalancer.listeners: - self.amphora_driver.start(loadbalancer, amphora) - LOG.debug("Started the listeners on the vip") - - def revert(self, loadbalancer, *args, **kwargs): - """Handle failed listeners starts.""" - - LOG.warning("Reverting listeners starts.") - for listener in loadbalancer.listeners: - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class AmphoraIndexListenersReload(BaseAmphoraTask): - """Task to reload all listeners on an amphora.""" - - def execute(self, loadbalancer, amphora_index, amphorae, - timeout_dict=None): - """Execute listener reload routines for listeners on an amphora.""" - if loadbalancer.listeners: - try: - self.amphora_driver.reload( - loadbalancer, amphorae[amphora_index], timeout_dict) - except Exception as e: - amphora_id = amphorae[amphora_index].id - LOG.warning('Failed to reload listeners on amphora %s. ' - 'Skipping this amphora as it is failing to ' - 'reload due to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) - - -class ListenerDelete(BaseAmphoraTask): - """Task to delete the listener on the vip.""" - - def execute(self, listener): - """Execute listener delete routines for an amphora.""" - # TODO(rm_work): This is only relevant because of UDP listeners now. - self.amphora_driver.delete(listener) - LOG.debug("Deleted the listener on the vip") - - def revert(self, listener, *args, **kwargs): - """Handle a failed listener delete.""" - - LOG.warning("Reverting listener delete.") - - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class AmphoraGetInfo(BaseAmphoraTask): - """Task to get information on an amphora.""" - - def execute(self, amphora): - """Execute get_info routine for an amphora.""" - self.amphora_driver.get_info(amphora) - - -class AmphoraGetDiagnostics(BaseAmphoraTask): - """Task to get diagnostics on the amphora and the loadbalancers.""" - - def execute(self, amphora): - """Execute get_diagnostic routine for an amphora.""" - self.amphora_driver.get_diagnostics(amphora) - - -class AmphoraFinalize(BaseAmphoraTask): - """Task to finalize the amphora before any listeners are configured.""" - - def execute(self, amphora): - """Execute finalize_amphora routine.""" - self.amphora_driver.finalize_amphora(amphora) - LOG.debug("Finalized the amphora.") - - def revert(self, result, amphora, *args, **kwargs): - """Handle a failed amphora finalize.""" - if isinstance(result, failure.Failure): - return - LOG.warning("Reverting amphora finalize.") - self.task_utils.mark_amphora_status_error(amphora.id) - - -class AmphoraPostNetworkPlug(BaseAmphoraTask): - """Task to notify the amphora post network plug.""" - - def execute(self, amphora, ports, amphora_network_config): - """Execute post_network_plug routine.""" - for port in ports: - self.amphora_driver.post_network_plug( - amphora, port, amphora_network_config) - LOG.debug("post_network_plug called on compute instance " - "%(compute_id)s for port %(port_id)s", - {"compute_id": amphora.compute_id, "port_id": port.id}) - - def revert(self, result, amphora, *args, **kwargs): - """Handle a failed post network plug.""" - if isinstance(result, failure.Failure): - return - LOG.warning("Reverting post network plug.") - self.task_utils.mark_amphora_status_error(amphora.id) - - -class AmphoraePostNetworkPlug(BaseAmphoraTask): - """Task to notify the amphorae post network plug.""" - - def execute(self, loadbalancer, updated_ports, amphorae_network_config): - """Execute post_network_plug routine.""" - amp_post_plug = AmphoraPostNetworkPlug() - # We need to make sure we have the fresh list of amphora - amphorae = self.amphora_repo.get_all( - db_apis.get_session(), load_balancer_id=loadbalancer.id, - status=constants.AMPHORA_ALLOCATED)[0] - for amphora in amphorae: - if amphora.id in updated_ports: - amp_post_plug.execute(amphora, updated_ports[amphora.id], - amphorae_network_config[amphora.id]) - - def revert(self, result, loadbalancer, updated_ports, *args, **kwargs): - """Handle a failed post network plug.""" - if isinstance(result, failure.Failure): - return - LOG.warning("Reverting post network plug.") - - amphorae = self.amphora_repo.get_all( - db_apis.get_session(), load_balancer_id=loadbalancer.id, - status=constants.AMPHORA_ALLOCATED)[0] - for amphora in amphorae: - self.task_utils.mark_amphora_status_error(amphora.id) - - -class AmphoraPostVIPPlug(BaseAmphoraTask): - """Task to notify the amphora post VIP plug.""" - - def execute(self, amphora, loadbalancer, amphorae_network_config): - """Execute post_vip_routine.""" - self.amphora_driver.post_vip_plug( - amphora, loadbalancer, amphorae_network_config) - LOG.debug("Notified amphora of vip plug") - - def revert(self, result, amphora, loadbalancer, *args, **kwargs): - """Handle a failed amphora vip plug notification.""" - if isinstance(result, failure.Failure): - return - LOG.warning("Reverting post vip plug.") - self.task_utils.mark_amphora_status_error(amphora.id) - - -class AmphoraePostVIPPlug(BaseAmphoraTask): - """Task to notify the amphorae post VIP plug.""" - - def execute(self, loadbalancer, amphorae_network_config): - """Execute post_vip_plug across the amphorae.""" - amp_post_vip_plug = AmphoraPostVIPPlug() - for amphora in loadbalancer.amphorae: - amp_post_vip_plug.execute(amphora, - loadbalancer, - amphorae_network_config) - - -class AmphoraCertUpload(BaseAmphoraTask): - """Upload a certificate to the amphora.""" - - def execute(self, amphora, server_pem): - """Execute cert_update_amphora routine.""" - LOG.debug("Upload cert in amphora REST driver") - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - self.amphora_driver.upload_cert_amp(amphora, fer.decrypt(server_pem)) - - -class AmphoraUpdateVRRPInterface(BaseAmphoraTask): - """Task to get and update the VRRP interface device name from amphora.""" - - def execute(self, amphora, timeout_dict=None): - try: - interface = self.amphora_driver.get_interface_from_ip( - amphora, amphora.vrrp_ip, timeout_dict=timeout_dict) - except Exception as e: - # This can occur when an active/standby LB has no listener - LOG.error('Failed to get amphora VRRP interface on amphora ' - '%s. Skipping this amphora as it is failing due to: ' - '%s', amphora.id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.ERROR) - return None - - self.amphora_repo.update(db_apis.get_session(), amphora.id, - vrrp_interface=interface) - return interface - - -class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): - """Task to get and update the VRRP interface device name from amphora.""" - - def execute(self, amphora_index, amphorae, timeout_dict=None): - amphora_id = amphorae[amphora_index].id - try: - interface = self.amphora_driver.get_interface_from_ip( - amphorae[amphora_index], amphorae[amphora_index].vrrp_ip, - timeout_dict=timeout_dict) - except Exception as e: - # This can occur when an active/standby LB has no listener - LOG.error('Failed to get amphora VRRP interface on amphora ' - '%s. Skipping this amphora as it is failing due to: ' - '%s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) - return None - - self.amphora_repo.update(db_apis.get_session(), amphora_id, - vrrp_interface=interface) - return interface - - -class AmphoraVRRPUpdate(BaseAmphoraTask): - """Task to update the VRRP configuration of an amphora.""" - - def execute(self, loadbalancer_id, amphorae_network_config, amphora, - amp_vrrp_int, timeout_dict=None): - """Execute update_vrrp_conf.""" - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) - # Note, we don't want this to cause a revert as it may be used - # in a failover flow with both amps failing. Skip it and let - # health manager fix it. - amphora.vrrp_interface = amp_vrrp_int - try: - self.amphora_driver.update_vrrp_conf( - loadbalancer, amphorae_network_config, amphora, timeout_dict) - except Exception as e: - LOG.error('Failed to update VRRP configuration amphora %s. ' - 'Skipping this amphora as it is failing to update due ' - 'to: %s', amphora.id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.ERROR) - - LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora.id) - - -class AmphoraIndexVRRPUpdate(BaseAmphoraTask): - """Task to update the VRRP configuration of an amphora.""" - - def execute(self, loadbalancer_id, amphorae_network_config, amphora_index, - amphorae, amp_vrrp_int, timeout_dict=None): - """Execute update_vrrp_conf.""" - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) - # Note, we don't want this to cause a revert as it may be used - # in a failover flow with both amps failing. Skip it and let - # health manager fix it. - amphora_id = amphorae[amphora_index].id - amphorae[amphora_index].vrrp_interface = amp_vrrp_int - try: - self.amphora_driver.update_vrrp_conf( - loadbalancer, amphorae_network_config, amphorae[amphora_index], - timeout_dict) - except Exception as e: - LOG.error('Failed to update VRRP configuration amphora %s. ' - 'Skipping this amphora as it is failing to update due ' - 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) - return - LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) - - -class AmphoraVRRPStart(BaseAmphoraTask): - """Task to start keepalived on an amphora. - - This will reload keepalived if it is already running. - """ - - def execute(self, amphora, timeout_dict=None): - self.amphora_driver.start_vrrp_service(amphora, timeout_dict) - LOG.debug("Started VRRP on amphora %s.", amphora.id) - - -class AmphoraIndexVRRPStart(BaseAmphoraTask): - """Task to start keepalived on an amphora. - - This will reload keepalived if it is already running. - """ - - def execute(self, amphora_index, amphorae, timeout_dict=None): - amphora_id = amphorae[amphora_index].id - try: - self.amphora_driver.start_vrrp_service(amphorae[amphora_index], - timeout_dict) - except Exception as e: - LOG.error('Failed to start VRRP on amphora %s. ' - 'Skipping this amphora as it is failing to start due ' - 'to: %s', amphora_id, str(e)) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR) - return - LOG.debug("Started VRRP on amphora %s.", amphorae[amphora_index].id) - - -class AmphoraComputeConnectivityWait(BaseAmphoraTask): - """Task to wait for the compute instance to be up.""" - - def execute(self, amphora): - """Execute get_info routine for an amphora until it responds.""" - try: - amp_info = self.amphora_driver.get_info(amphora) - LOG.debug('Successfuly connected to amphora %s: %s', - amphora.id, amp_info) - except driver_except.TimeOutException: - LOG.error("Amphora compute instance failed to become reachable. " - "This either means the compute driver failed to fully " - "boot the instance inside the timeout interval or the " - "instance is not reachable via the lb-mgmt-net.") - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.ERROR) - raise - - -class AmphoraConfigUpdate(BaseAmphoraTask): - """Task to push a new amphora agent configuration to the amphora.""" - - def execute(self, amphora, flavor): - # Extract any flavor based settings - if flavor: - topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, - CONF.controller_worker.loadbalancer_topology) - else: - topology = CONF.controller_worker.loadbalancer_topology - - # Build the amphora agent config - agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater() - agent_config = agent_cfg_tmpl.build_agent_config(amphora.id, topology) - - # Push the new configuration to the amphora - try: - self.amphora_driver.update_amphora_agent_config(amphora, - agent_config) - except driver_except.AmpDriverNotImplementedError: - LOG.error('Amphora %s does not support agent configuration ' - 'update. Please update the amphora image for this ' - 'amphora. Skipping.', amphora.id) diff --git a/octavia/controller/worker/v1/tasks/cert_task.py b/octavia/controller/worker/v1/tasks/cert_task.py deleted file mode 100644 index a4ad873875..0000000000 --- a/octavia/controller/worker/v1/tasks/cert_task.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cryptography import fernet -from oslo_config import cfg -from stevedore import driver as stevedore_driver -from taskflow import task - -from octavia.common import utils - -CONF = cfg.CONF - - -class BaseCertTask(task.Task): - """Base task to load drivers common to the tasks.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.cert_generator = stevedore_driver.DriverManager( - namespace='octavia.cert_generator', - name=CONF.certificates.cert_generator, - invoke_on_load=True, - ).driver - - -class GenerateServerPEMTask(BaseCertTask): - """Create the server certs for the agent comm - - Use the amphora_id for the CN - """ - - def execute(self, amphora_id): - cert = self.cert_generator.generate_cert_key_pair( - cn=amphora_id, - validity=CONF.certificates.cert_validity_time) - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - - return fer.encrypt(cert.certificate + cert.private_key) diff --git a/octavia/controller/worker/v1/tasks/compute_tasks.py b/octavia/controller/worker/v1/tasks/compute_tasks.py deleted file mode 100644 index 390ea91202..0000000000 --- a/octavia/controller/worker/v1/tasks/compute_tasks.py +++ /dev/null @@ -1,335 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import time - -from cryptography import fernet -from oslo_config import cfg -from oslo_log import log as logging -from stevedore import driver as stevedore_driver -from taskflow import task -from taskflow.types import failure -import tenacity - -from octavia.amphorae.backends.agent import agent_jinja_cfg -from octavia.common import constants -from octavia.common import exceptions -from octavia.common.jinja.logging import logging_jinja_cfg -from octavia.common.jinja import user_data_jinja_cfg -from octavia.common import utils -from octavia.controller.worker import amphora_rate_limit - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class BaseComputeTask(task.Task): - """Base task to load drivers common to the tasks.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.compute = stevedore_driver.DriverManager( - namespace='octavia.compute.drivers', - name=CONF.controller_worker.compute_driver, - invoke_on_load=True - ).driver - self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit() - - -class ComputeCreate(BaseComputeTask): - """Create the compute instance for a new amphora.""" - - def execute(self, amphora_id, server_group_id, config_drive_files=None, - build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - ports=None, flavor=None, availability_zone=None): - """Create an amphora - - :returns: an amphora - """ - ports = ports or [] - network_ids = CONF.controller_worker.amp_boot_network_list[:] - config_drive_files = config_drive_files or {} - user_data = None - LOG.debug("Compute create execute for amphora with id %s", amphora_id) - - user_data_config_drive = CONF.controller_worker.user_data_config_drive - key_name = CONF.controller_worker.amp_ssh_key_name - - # Apply an Octavia flavor customizations - if flavor: - topology = flavor.get(constants.LOADBALANCER_TOPOLOGY, - CONF.controller_worker.loadbalancer_topology) - amp_compute_flavor = flavor.get( - constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id) - amp_image_tag = flavor.get( - constants.AMP_IMAGE_TAG, CONF.controller_worker.amp_image_tag) - else: - topology = CONF.controller_worker.loadbalancer_topology - amp_compute_flavor = CONF.controller_worker.amp_flavor_id - amp_image_tag = CONF.controller_worker.amp_image_tag - - if availability_zone: - amp_availability_zone = availability_zone.get( - constants.COMPUTE_ZONE) - amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) - if amp_network: - network_ids = [amp_network] - else: - amp_availability_zone = None - try: - if CONF.haproxy_amphora.build_rate_limit != -1: - self.rate_limit.add_to_build_request_queue( - amphora_id, build_type_priority) - - agent_cfg = agent_jinja_cfg.AgentJinjaTemplater() - config_drive_files['/etc/octavia/amphora-agent.conf'] = ( - agent_cfg.build_agent_config(amphora_id, topology)) - - logging_cfg = logging_jinja_cfg.LoggingJinjaTemplater( - CONF.amphora_agent.logging_template_override) - config_drive_files['/etc/rsyslog.d/10-rsyslog.conf'] = ( - logging_cfg.build_logging_config()) - - udtemplater = user_data_jinja_cfg.UserDataJinjaCfg() - user_data = udtemplater.build_user_data_config( - config_drive_files if user_data_config_drive else {}) - if user_data_config_drive: - config_drive_files = None - - compute_id = self.compute.build( - name="amphora-" + amphora_id, - amphora_flavor=amp_compute_flavor, - image_tag=amp_image_tag, - image_owner=CONF.controller_worker.amp_image_owner_id, - key_name=key_name, - sec_groups=CONF.controller_worker.amp_secgroup_list, - network_ids=network_ids, - port_ids=[port.id for port in ports], - config_drive_files=config_drive_files, - user_data=user_data, - server_group_id=server_group_id, - availability_zone=amp_availability_zone) - - LOG.info("Server created with id: %s for amphora id: %s", - compute_id, amphora_id) - return compute_id - - except Exception: - LOG.exception("Compute create for amphora id: %s failed", - amphora_id) - raise - - def revert(self, result, amphora_id, *args, **kwargs): - """This method will revert the creation of the - - amphora. So it will just delete it in this flow - """ - if isinstance(result, failure.Failure): - return - compute_id = result - LOG.warning("Reverting compute create for amphora with id " - "%(amp)s and compute id: %(comp)s", - {'amp': amphora_id, 'comp': compute_id}) - try: - self.compute.delete(compute_id) - except Exception: - LOG.exception("Reverting compute create failed") - - -class CertComputeCreate(ComputeCreate): - def execute(self, amphora_id, server_pem, server_group_id, - build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - ports=None, flavor=None, availability_zone=None): - """Create an amphora - - :returns: an amphora - """ - - # load client certificate - with open(CONF.controller_worker.client_ca, - 'r', encoding='utf-8') as client_ca: - ca = client_ca.read() - - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - config_drive_files = { - '/etc/octavia/certs/server.pem': fer.decrypt( - server_pem).decode('utf-8'), - '/etc/octavia/certs/client_ca.pem': ca} - return super().execute( - amphora_id, config_drive_files=config_drive_files, - build_type_priority=build_type_priority, - server_group_id=server_group_id, ports=ports, flavor=flavor, - availability_zone=availability_zone) - - -class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): - """Delete the amphorae on a load balancer. - - Iterate through amphorae, deleting them - """ - - def execute(self, loadbalancer): - for amp in loadbalancer.amphorae: - # The compute driver will already handle NotFound - try: - self.compute.delete(amp.compute_id) - except Exception: - LOG.exception("Compute delete for amphora id: %s failed", - amp.id) - raise - - -class ComputeDelete(BaseComputeTask): - - @tenacity.retry(retry=tenacity.retry_if_exception_type(), - stop=tenacity.stop_after_attempt(CONF.compute.max_retries), - wait=tenacity.wait_exponential( - multiplier=CONF.compute.retry_backoff, - min=CONF.compute.retry_interval, - max=CONF.compute.retry_max), reraise=True) - def execute(self, amphora, passive_failure=False): - if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: - LOG.debug('Compute delete execute for amphora with ID %s and ' - 'compute ID: %s', amphora.id, amphora.compute_id) - else: - LOG.warning('Retrying compute delete of %s attempt %s of %s.', - amphora.compute_id, - self.execute.retry.statistics[ - constants.ATTEMPT_NUMBER], - self.execute.retry.stop.max_attempt_number) - # Let the Taskflow engine know we are working and alive - # Don't use get with a default for 'attempt_number', we need to fail - # if that number is missing. - self.update_progress( - self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / - self.execute.retry.stop.max_attempt_number) - - try: - self.compute.delete(amphora.compute_id) - except Exception: - if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != - self.execute.retry.stop.max_attempt_number): - LOG.warning('Compute delete for amphora id: %s failed. ' - 'Retrying.', amphora.id) - raise - if passive_failure: - LOG.exception('Compute delete for compute ID: %s on amphora ' - 'ID: %s failed. This resource will be abandoned ' - 'and should manually be cleaned up once the ' - 'compute service is functional.', - amphora.compute_id, amphora.id) - else: - LOG.exception('Compute delete for compute ID: %s on amphora ' - 'ID: %s failed. The compute service has failed. ' - 'Aborting and reverting.', amphora.compute_id, - amphora.id) - raise - - -class ComputeActiveWait(BaseComputeTask): - """Wait for the compute driver to mark the amphora active.""" - - def execute(self, compute_id, amphora_id, availability_zone): - """Wait for the compute driver to mark the amphora active - - :raises: Generic exception if the amphora is not active - :returns: An amphora object - """ - if availability_zone: - amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) - else: - amp_network = None - for i in range(CONF.controller_worker.amp_active_retries): - amp, fault = self.compute.get_amphora(compute_id, amp_network) - if amp.status == constants.ACTIVE: - if CONF.haproxy_amphora.build_rate_limit != -1: - self.rate_limit.remove_from_build_req_queue(amphora_id) - return amp - if amp.status == constants.ERROR: - raise exceptions.ComputeBuildException(fault=fault) - time.sleep(CONF.controller_worker.amp_active_wait_sec) - - raise exceptions.ComputeWaitTimeoutException(id=compute_id) - - -class NovaServerGroupCreate(BaseComputeTask): - def execute(self, loadbalancer_id): - """Create a server group by nova client api - - :param loadbalancer_id: will be used for server group's name - :param policy: will used for server group's policy - :raises: Generic exception if the server group is not created - :returns: server group's id - """ - - name = 'octavia-lb-' + loadbalancer_id - server_group = self.compute.create_server_group( - name, CONF.nova.anti_affinity_policy) - LOG.debug("Server Group created with id: %s for load balancer id: " - "%s", server_group.id, loadbalancer_id) - return server_group.id - - def revert(self, result, *args, **kwargs): - """This method will revert the creation of the - - :param result: here it refers to server group id - """ - server_group_id = result - LOG.warning("Reverting server group create with id:%s", - server_group_id) - try: - self.compute.delete_server_group(server_group_id) - except Exception as e: - LOG.error("Failed to delete server group. Resources may " - "still be in use for server group: %(sg)s due to " - "error: %(except)s", - {'sg': server_group_id, 'except': str(e)}) - - -class NovaServerGroupDelete(BaseComputeTask): - def execute(self, server_group_id): - if server_group_id is not None: - self.compute.delete_server_group(server_group_id) - else: - return - - -class AttachPort(BaseComputeTask): - def execute(self, amphora, port): - """Attach a port to an amphora instance. - - :param amphora: The amphora to attach the port to. - :param port: The port to attach to the amphora. - :returns: None - """ - LOG.debug('Attaching port: %s to compute: %s', - port.id, amphora.compute_id) - self.compute.attach_network_or_port(amphora.compute_id, - port_id=port.id) - - def revert(self, amphora, port, *args, **kwargs): - """Revert our port attach. - - :param amphora: The amphora to detach the port from. - :param port: The port to attach to the amphora. - """ - LOG.warning('Reverting port: %s attach to compute: %s', - port.id, amphora.compute_id) - try: - self.compute.detach_port(amphora.compute_id, port.id) - except Exception as e: - LOG.error('Failed to detach port %s from compute %s for revert ' - 'due to %s.', port.id, amphora.compute_id, str(e)) diff --git a/octavia/controller/worker/v1/tasks/database_tasks.py b/octavia/controller/worker/v1/tasks/database_tasks.py deleted file mode 100644 index 410a328f25..0000000000 --- a/octavia/controller/worker/v1/tasks/database_tasks.py +++ /dev/null @@ -1,2764 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from cryptography import fernet -from oslo_config import cfg -from oslo_db import exception as odb_exceptions -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import uuidutils -import sqlalchemy -from sqlalchemy.orm import exc -from taskflow import task -from taskflow.types import failure - -from octavia.common import constants -from octavia.common import data_models -from octavia.common.tls_utils import cert_parser -from octavia.common import utils -from octavia.controller.worker import task_utils as task_utilities -from octavia.db import api as db_apis -from octavia.db import repositories as repo - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class BaseDatabaseTask(task.Task): - """Base task to load drivers common to the tasks.""" - - def __init__(self, **kwargs): - self.repos = repo.Repositories() - self.amphora_repo = repo.AmphoraRepository() - self.health_mon_repo = repo.HealthMonitorRepository() - self.listener_repo = repo.ListenerRepository() - self.loadbalancer_repo = repo.LoadBalancerRepository() - self.vip_repo = repo.VipRepository() - self.member_repo = repo.MemberRepository() - self.pool_repo = repo.PoolRepository() - self.amp_health_repo = repo.AmphoraHealthRepository() - self.l7policy_repo = repo.L7PolicyRepository() - self.l7rule_repo = repo.L7RuleRepository() - self.task_utils = task_utilities.TaskUtils() - super().__init__(**kwargs) - - def _delete_from_amp_health(self, amphora_id): - """Delete the amphora_health record for an amphora. - - :param amphora_id: The amphora id to delete - """ - LOG.debug('Disabling health monitoring on amphora: %s', amphora_id) - try: - self.amp_health_repo.delete(db_apis.get_session(), - amphora_id=amphora_id) - except (sqlalchemy.orm.exc.NoResultFound, - sqlalchemy.orm.exc.UnmappedInstanceError): - LOG.debug('No existing amphora health record to delete ' - 'for amphora: %s, skipping.', amphora_id) - - def _mark_amp_health_busy(self, amphora_id): - """Mark the amphora_health record busy for an amphora. - - :param amphora_id: The amphora id to mark busy - """ - LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id) - try: - self.amp_health_repo.update(db_apis.get_session(), - amphora_id=amphora_id, - busy=True) - except (sqlalchemy.orm.exc.NoResultFound, - sqlalchemy.orm.exc.UnmappedInstanceError): - LOG.debug('No existing amphora health record to mark busy ' - 'for amphora: %s, skipping.', amphora_id) - - -class CreateAmphoraInDB(BaseDatabaseTask): - """Task to create an initial amphora in the Database.""" - - def execute(self, *args, loadbalancer_id=None, **kwargs): - """Creates an pending create amphora record in the database. - - :returns: The created amphora object - """ - - amphora = self.amphora_repo.create(db_apis.get_session(), - id=uuidutils.generate_uuid(), - load_balancer_id=loadbalancer_id, - status=constants.PENDING_CREATE, - cert_busy=False) - if loadbalancer_id: - LOG.info("Created Amphora %s in DB for load balancer %s", - amphora.id, loadbalancer_id) - else: - LOG.info("Created Amphora %s in DB", amphora.id) - return amphora.id - - def revert(self, result, *args, **kwargs): - """Revert by storing the amphora in error state in the DB - - In a future version we might change the status to DELETED - if deleting the amphora was successful - - :param result: Id of created amphora. - :returns: None - """ - - if isinstance(result, failure.Failure): - # This task's execute failed, so nothing needed to be done to - # revert - return - - # At this point the revert is being called because another task - # executed after this failed so we will need to do something and - # result is the amphora's id - - LOG.warning("Reverting create amphora in DB for amp id %s ", result) - - # Delete the amphora for now. May want to just update status later - try: - self.amphora_repo.delete(db_apis.get_session(), id=result) - except Exception as e: - LOG.error("Failed to delete amphora %(amp)s " - "in the database due to: " - "%(except)s", {'amp': result, 'except': str(e)}) - - -class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask): - """Task to mark a list of amphora deleted in the Database.""" - - def execute(self, loadbalancer): - """Update load balancer's amphorae statuses to DELETED in the database. - - :param loadbalancer: The load balancer which amphorae should be - marked DELETED. - :returns: None - """ - for amp in loadbalancer.amphorae: - LOG.debug("Marking amphora %s DELETED ", amp.id) - self.amphora_repo.update(db_apis.get_session(), - id=amp.id, status=constants.DELETED) - - -class DeleteHealthMonitorInDB(BaseDatabaseTask): - """Delete the health monitor in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Delete the health monitor in DB - - :param health_mon: The health monitor which should be deleted - :returns: None - """ - - LOG.debug("DB delete health monitor: %s ", health_mon.id) - try: - self.health_mon_repo.delete(db_apis.get_session(), - id=health_mon.id) - except exc.NoResultFound: - # ignore if the HealthMonitor was not found - pass - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor ERROR since the mark active couldn't happen - - :param health_mon: The health monitor which couldn't be deleted - :returns: None - """ - - LOG.warning("Reverting mark health monitor delete in DB " - "for health monitor with id %s", health_mon.id) - self.health_mon_repo.update(db_apis.get_session(), id=health_mon.id, - provisioning_status=constants.ERROR) - - -class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB): - """Delete the health monitor in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Delete the health monitor in the DB. - - :param pool: A pool which health monitor should be deleted. - :returns: None - """ - super().execute( - pool.health_monitor) - - def revert(self, pool, *args, **kwargs): - """Mark the health monitor ERROR since the mark active couldn't happen - - :param pool: A pool which health monitor couldn't be deleted - :returns: None - """ - super().revert( - pool.health_monitor, *args, **kwargs) - - -class DeleteMemberInDB(BaseDatabaseTask): - """Delete the member in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Delete the member in the DB - - :param member: The member to be deleted - :returns: None - """ - - LOG.debug("DB delete member for id: %s ", member.id) - self.member_repo.delete(db_apis.get_session(), id=member.id) - - def revert(self, member, *args, **kwargs): - """Mark the member ERROR since the delete couldn't happen - - :param member: Member that failed to get deleted - :returns: None - """ - - LOG.warning("Reverting delete in DB for member id %s", member.id) - try: - self.member_repo.update(db_apis.get_session(), member.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update member %(mem)s " - "provisioning_status to ERROR due to: %(except)s", - {'mem': member.id, 'except': str(e)}) - - -class DeleteListenerInDB(BaseDatabaseTask): - """Delete the listener in the DB.""" - - def execute(self, listener): - """Delete the listener in DB - - :param listener: The listener to delete - :returns: None - """ - LOG.debug("Delete in DB for listener id: %s", listener.id) - self.listener_repo.delete(db_apis.get_session(), id=listener.id) - - def revert(self, listener, *args, **kwargs): - """Mark the listener ERROR since the listener didn't delete - - :param listener: Listener that failed to get deleted - :returns: None - """ - - LOG.warning("Reverting mark listener delete in DB for listener id %s", - listener.id) - - -class DeletePoolInDB(BaseDatabaseTask): - """Delete the pool in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Delete the pool in DB - - :param pool: The pool to be deleted - :returns: None - """ - - LOG.debug("Delete in DB for pool id: %s ", pool.id) - self.pool_repo.delete(db_apis.get_session(), id=pool.id) - - def revert(self, pool, *args, **kwargs): - """Mark the pool ERROR since the delete couldn't happen - - :param pool: Pool that failed to get deleted - :returns: None - """ - - LOG.warning("Reverting delete in DB for pool id %s", pool.id) - try: - self.pool_repo.update(db_apis.get_session(), pool.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update pool %(pool)s " - "provisioning_status to ERROR due to: %(except)s", - {'pool': pool.id, 'except': str(e)}) - - -class DeleteL7PolicyInDB(BaseDatabaseTask): - """Delete the L7 policy in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Delete the l7policy in DB - - :param l7policy: The l7policy to be deleted - :returns: None - """ - - LOG.debug("Delete in DB for l7policy id: %s ", l7policy.id) - self.l7policy_repo.delete(db_apis.get_session(), id=l7policy.id) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy ERROR since the delete couldn't happen - - :param l7policy: L7 policy that failed to get deleted - :returns: None - """ - - LOG.warning("Reverting delete in DB for l7policy id %s", l7policy.id) - try: - self.l7policy_repo.update(db_apis.get_session(), l7policy.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update l7policy %(l7policy)s " - "provisioning_status to ERROR due to: %(except)s", - {'l7policy': l7policy.id, 'except': str(e)}) - - -class DeleteL7RuleInDB(BaseDatabaseTask): - """Delete the L7 rule in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Delete the l7rule in DB - - :param l7rule: The l7rule to be deleted - :returns: None - """ - - LOG.debug("Delete in DB for l7rule id: %s ", l7rule.id) - self.l7rule_repo.delete(db_apis.get_session(), id=l7rule.id) - - def revert(self, l7rule, *args, **kwargs): - """Mark the l7rule ERROR since the delete couldn't happen - - :param l7rule: L7 rule that failed to get deleted - :returns: None - """ - - LOG.warning("Reverting delete in DB for l7rule id %s", l7rule.id) - try: - self.l7rule_repo.update(db_apis.get_session(), l7rule.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update l7rule %(l7rule)s " - "provisioning_status to ERROR due to: %(except)s", - {'l7rule': l7rule.id, 'except': str(e)}) - - -class ReloadAmphora(BaseDatabaseTask): - """Get an amphora object from the database.""" - - def execute(self, amphora_id): - """Get an amphora object from the database. - - :param amphora_id: The amphora ID to lookup - :returns: The amphora object - """ - - LOG.debug("Get amphora from DB for amphora id: %s ", amphora_id) - return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) - - -class ReloadLoadBalancer(BaseDatabaseTask): - """Get an load balancer object from the database.""" - - def execute(self, loadbalancer_id, *args, **kwargs): - """Get an load balancer object from the database. - - :param loadbalancer_id: The load balancer ID to lookup - :returns: The load balancer object - """ - - LOG.debug("Get load balancer from DB for load balancer id: %s ", - loadbalancer_id) - return self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) - - -class UpdateVIPAfterAllocation(BaseDatabaseTask): - """Update a VIP associated with a given load balancer.""" - - def execute(self, loadbalancer_id, vip): - """Update a VIP associated with a given load balancer. - - :param loadbalancer_id: Id of a load balancer which VIP should be - updated. - :param vip: data_models.Vip object with update data. - :returns: The load balancer object. - """ - self.repos.vip.update(db_apis.get_session(), loadbalancer_id, - port_id=vip.port_id, subnet_id=vip.subnet_id, - ip_address=vip.ip_address) - lb = self.repos.load_balancer.get(db_apis.get_session(), - id=loadbalancer_id) - LOG.info("Updated vip with port id %s, subnet id %s, ip address %s " - "for load balancer %s", - vip.port_id, - vip.subnet_id, - vip.ip_address, - loadbalancer_id) - return lb - - -class UpdateAmphoraeVIPData(BaseDatabaseTask): - """Update amphorae VIP data.""" - - def execute(self, amps_data): - """Update amphorae VIP data. - - :param amps_data: Amphorae update dicts. - :returns: None - """ - for amp_data in amps_data: - self.repos.amphora.update(db_apis.get_session(), amp_data.id, - vrrp_ip=amp_data.vrrp_ip, - ha_ip=amp_data.ha_ip, - vrrp_port_id=amp_data.vrrp_port_id, - ha_port_id=amp_data.ha_port_id, - vrrp_id=1) - - -class UpdateAmphoraVIPData(BaseDatabaseTask): - """Update amphorae VIP data.""" - - def execute(self, amp_data): - """Update amphorae VIP data. - - :param amps_data: Amphorae update dicts. - :returns: None - """ - self.repos.amphora.update(db_apis.get_session(), amp_data.id, - vrrp_ip=amp_data.vrrp_ip, - ha_ip=amp_data.ha_ip, - vrrp_port_id=amp_data.vrrp_port_id, - ha_port_id=amp_data.ha_port_id, - vrrp_id=1) - - -class UpdateAmpFailoverDetails(BaseDatabaseTask): - """Update amphora failover details in the database.""" - - def execute(self, amphora, vip, base_port): - """Update amphora failover details in the database. - - :param amphora: The amphora to update - :param vip: The VIP object associated with this amphora. - :param base_port: The base port object associated with the amphora. - :returns: None - """ - # role and vrrp_priority will be updated later. - self.repos.amphora.update(db_apis.get_session(), amphora.id, - vrrp_ip=base_port.fixed_ips[0].ip_address, - ha_ip=vip.ip_address, - vrrp_port_id=base_port.id, - ha_port_id=vip.port_id, - vrrp_id=1) - - -class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): - """Associate failover amphora with loadbalancer in the database.""" - - def execute(self, amphora_id, loadbalancer_id): - """Associate failover amphora with loadbalancer in the database. - - :param amphora_id: Id of an amphora to update - :param loadbalancer_id: Id of a load balancer to be associated with - a given amphora. - :returns: None - """ - self.repos.amphora.associate(db_apis.get_session(), - load_balancer_id=loadbalancer_id, - amphora_id=amphora_id) - - def revert(self, amphora_id, *args, **kwargs): - """Remove amphora-load balancer association. - - :param amphora_id: Id of an amphora that couldn't be associated - with a load balancer. - :returns: None - """ - try: - self.repos.amphora.update(db_apis.get_session(), amphora_id, - loadbalancer_id=None) - except Exception as e: - LOG.error("Failed to update amphora %(amp)s " - "load balancer id to None due to: " - "%(except)s", {'amp': amphora_id, 'except': str(e)}) - - -class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): - """Alter the amphora role and priority in DB.""" - - def _execute(self, amphora, amp_role, vrrp_priority): - """Alter the amphora role and priority in DB. - - :param amphora: Amphora to update. - :param amp_role: Amphora role to be set. - :param vrrp_priority: VRRP priority to set. - :returns: None - """ - LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", - {'role': amp_role, 'amp': amphora.id}) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - role=amp_role, - vrrp_priority=vrrp_priority) - - def _revert(self, result, amphora, *args, **kwargs): - """Removes role and vrrp_priority association. - - :param result: Result of the association. - :param amphora: Amphora which role/vrrp_priority association - failed. - :returns: None - """ - - if isinstance(result, failure.Failure): - return - - LOG.warning("Reverting amphora role in DB for amp id %(amp)s", - {'amp': amphora.id}) - try: - self.amphora_repo.update(db_apis.get_session(), amphora.id, - role=None, - vrrp_priority=None) - except Exception as e: - LOG.error("Failed to update amphora %(amp)s " - "role and vrrp_priority to None due to: " - "%(except)s", {'amp': amphora.id, 'except': str(e)}) - - -class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): - """Alter the amphora role to: MASTER.""" - - def execute(self, amphora): - """Mark amphora as MASTER in db. - - :param amphora: Amphora to update role. - :returns: None - """ - amp_role = constants.ROLE_MASTER - self._execute(amphora, amp_role, constants.ROLE_MASTER_PRIORITY) - - def revert(self, result, amphora, *args, **kwargs): - """Removes amphora role association. - - :param amphora: Amphora to update role. - :returns: None - """ - self._revert(result, amphora, *args, **kwargs) - - -class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): - """Alter the amphora role to: Backup.""" - - def execute(self, amphora): - """Mark amphora as BACKUP in db. - - :param amphora: Amphora to update role. - :returns: None - """ - amp_role = constants.ROLE_BACKUP - self._execute(amphora, amp_role, constants.ROLE_BACKUP_PRIORITY) - - def revert(self, result, amphora, *args, **kwargs): - """Removes amphora role association. - - :param amphora: Amphora to update role. - :returns: None - """ - self._revert(result, amphora, *args, **kwargs) - - -class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): - """Alter the amphora role to: Standalone.""" - - def execute(self, amphora): - """Mark amphora as STANDALONE in db. - - :param amphora: Amphora to update role. - :returns: None - """ - amp_role = constants.ROLE_STANDALONE - self._execute(amphora, amp_role, None) - - def revert(self, result, amphora, *args, **kwargs): - """Removes amphora role association. - - :param amphora: Amphora to update role. - :returns: None - """ - self._revert(result, amphora, *args, **kwargs) - - -class MarkAmphoraAllocatedInDB(BaseDatabaseTask): - """Will mark an amphora as allocated to a load balancer in the database. - - Assume sqlalchemy made sure the DB got - retried sufficiently - so just abort - """ - - def execute(self, amphora, loadbalancer_id): - """Mark amphora as allocated to a load balancer in DB. - - :param amphora: Amphora to be updated. - :param loadbalancer_id: Id of a load balancer to which an amphora - should be allocated. - :returns: None - """ - - LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with ' - 'compute id: %(comp)s for load balancer: %(lb)s', - { - 'amp': amphora.id, - 'comp': amphora.compute_id, - 'lb': loadbalancer_id - }) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.AMPHORA_ALLOCATED, - compute_id=amphora.compute_id, - lb_network_ip=amphora.lb_network_ip, - load_balancer_id=loadbalancer_id) - - def revert(self, result, amphora, loadbalancer_id, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param result: Execute task result - :param amphora: Amphora that was updated. - :param loadbalancer_id: Id of a load balancer to which an amphora - failed to be allocated. - :returns: None - """ - - if isinstance(result, failure.Failure): - return - - LOG.warning("Reverting mark amphora ready in DB for amp " - "id %(amp)s and compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - self.task_utils.mark_amphora_status_error(amphora.id) - - -class MarkAmphoraBootingInDB(BaseDatabaseTask): - """Mark the amphora as booting in the database.""" - - def execute(self, amphora_id, compute_id): - """Mark amphora booting in DB. - - :param amphora_id: Id of the amphora to update - :param compute_id: Id of a compute on which an amphora resides - :returns: None - """ - - LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with " - "compute id %(id)s", {'amp': amphora_id, 'id': compute_id}) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.AMPHORA_BOOTING, - compute_id=compute_id) - - def revert(self, result, amphora_id, compute_id, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param result: Execute task result - :param amphora_id: Id of the amphora that failed to update - :param compute_id: Id of a compute on which an amphora resides - :returns: None - """ - - if isinstance(result, failure.Failure): - return - - LOG.warning("Reverting mark amphora booting in DB for amp " - "id %(amp)s and compute id %(comp)s", - {'amp': amphora_id, 'comp': compute_id}) - try: - self.amphora_repo.update(db_apis.get_session(), amphora_id, - status=constants.ERROR, - compute_id=compute_id) - except Exception as e: - LOG.error("Failed to update amphora %(amp)s " - "status to ERROR due to: " - "%(except)s", {'amp': amphora_id, 'except': str(e)}) - - -class MarkAmphoraDeletedInDB(BaseDatabaseTask): - """Mark the amphora deleted in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, amphora): - """Mark the amphora as deleted in DB. - - :param amphora: Amphora to be updated. - :returns: None - """ - - LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " - "compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.DELETED) - - def revert(self, amphora, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param amphora: Amphora that was updated. - :returns: None - """ - - LOG.warning("Reverting mark amphora deleted in DB " - "for amp id %(amp)s and compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - self.task_utils.mark_amphora_status_error(amphora.id) - - -class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): - """Mark the amphora pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, amphora): - """Mark the amphora as pending delete in DB. - - :param amphora: Amphora to be updated. - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " - "with compute id %(id)s", - {'amp': amphora.id, 'id': amphora.compute_id}) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.PENDING_DELETE) - - def revert(self, amphora, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param amphora: Amphora that was updated. - :returns: None - """ - - LOG.warning("Reverting mark amphora pending delete in DB " - "for amp id %(amp)s and compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - self.task_utils.mark_amphora_status_error(amphora.id) - - -class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): - """Mark the amphora pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, amphora): - """Mark the amphora as pending update in DB. - - :param amphora: Amphora to be updated. - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for amphora: %(amp)s " - "with compute id %(id)s", - {'amp': amphora.id, 'id': amphora.compute_id}) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.PENDING_UPDATE) - - def revert(self, amphora, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param amphora: Amphora that was updated. - :returns: None - """ - - LOG.warning("Reverting mark amphora pending update in DB " - "for amp id %(amp)s and compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - self.task_utils.mark_amphora_status_error(amphora.id) - - -class MarkAmphoraReadyInDB(BaseDatabaseTask): - """This task will mark an amphora as ready in the database. - - Assume sqlalchemy made sure the DB got - retried sufficiently - so just abort - """ - - def execute(self, amphora): - """Mark amphora as ready in DB. - - :param amphora: Amphora to be updated. - :returns: None - """ - - LOG.info("Mark READY in DB for amphora: %(amp)s with compute " - "id %(comp)s", - {"amp": amphora.id, "comp": amphora.compute_id}) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.AMPHORA_READY, - compute_id=amphora.compute_id, - lb_network_ip=amphora.lb_network_ip) - - def revert(self, amphora, *args, **kwargs): - """Mark the amphora as broken and ready to be cleaned up. - - :param amphora: Amphora that was updated. - :returns: None - """ - - LOG.warning("Reverting mark amphora ready in DB for amp " - "id %(amp)s and compute id %(comp)s", - {'amp': amphora.id, 'comp': amphora.compute_id}) - try: - self.amphora_repo.update(db_apis.get_session(), amphora.id, - status=constants.ERROR, - compute_id=amphora.compute_id, - lb_network_ip=amphora.lb_network_ip) - except Exception as e: - LOG.error("Failed to update amphora %(amp)s " - "status to ERROR due to: " - "%(except)s", {'amp': amphora.id, 'except': str(e)}) - - -class UpdateAmphoraComputeId(BaseDatabaseTask): - """Associate amphora with a compute in DB.""" - - def execute(self, amphora_id, compute_id): - """Associate amphora with a compute in DB. - - :param amphora_id: Id of the amphora to update - :param compute_id: Id of a compute on which an amphora resides - :returns: None - """ - - self.amphora_repo.update(db_apis.get_session(), amphora_id, - compute_id=compute_id) - - -class UpdateAmphoraInfo(BaseDatabaseTask): - """Update amphora with compute instance details.""" - - def execute(self, amphora_id, compute_obj): - """Update amphora with compute instance details. - - :param amphora_id: Id of the amphora to update - :param compute_obj: Compute on which an amphora resides - :returns: Updated amphora object - """ - self.amphora_repo.update( - db_apis.get_session(), amphora_id, - lb_network_ip=compute_obj.lb_network_ip, - cached_zone=compute_obj.cached_zone, - image_id=compute_obj.image_id, - compute_flavor=compute_obj.compute_flavor) - return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) - - -class UpdateAmphoraDBCertExpiration(BaseDatabaseTask): - """Update the amphora expiration date with new cert file date.""" - - def execute(self, amphora_id, server_pem): - """Update the amphora expiration date with new cert file date. - - :param amphora_id: Id of the amphora to update - :param server_pem: Certificate in PEM format - :returns: None - """ - - LOG.debug("Update DB cert expiry date of amphora id: %s", amphora_id) - - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - cert_expiration = cert_parser.get_cert_expiration( - fer.decrypt(server_pem)) - LOG.debug("Certificate expiration date is %s ", cert_expiration) - self.amphora_repo.update(db_apis.get_session(), amphora_id, - cert_expiration=cert_expiration) - - -class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask): - """Update the amphora cert_busy flag to be false.""" - - def execute(self, amphora): - """Update the amphora cert_busy flag to be false. - - :param amphora: Amphora to be updated. - :returns: None - """ - - LOG.debug("Update cert_busy flag of amphora id %s to False", - amphora.id) - self.amphora_repo.update(db_apis.get_session(), amphora.id, - cert_busy=False) - - -class MarkLBActiveInDB(BaseDatabaseTask): - """Mark the load balancer active in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def __init__(self, mark_subobjects=False, **kwargs): - super().__init__(**kwargs) - self.mark_subobjects = mark_subobjects - - def execute(self, loadbalancer): - """Mark the load balancer as active in DB. - - This also marks ACTIVE all sub-objects of the load balancer if - self.mark_subobjects is True. - - :param loadbalancer: Load balancer object to be updated - :returns: None - """ - - if self.mark_subobjects: - LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", - loadbalancer.id) - for listener in loadbalancer.listeners: - self._mark_listener_status(listener, constants.ACTIVE) - for pool in loadbalancer.pools: - self._mark_pool_status(pool, constants.ACTIVE) - - LOG.info("Mark ACTIVE in DB for load balancer id: %s", - loadbalancer.id) - self.loadbalancer_repo.update(db_apis.get_session(), - loadbalancer.id, - provisioning_status=constants.ACTIVE) - - def _mark_listener_status(self, listener, status): - self.listener_repo.update(db_apis.get_session(), - listener.id, - provisioning_status=status) - LOG.debug("Marking all l7policies of listener %s %s", - listener.id, status) - for l7policy in listener.l7policies: - self._mark_l7policy_status(l7policy, status) - - if listener.default_pool: - LOG.debug("Marking default pool of listener %s %s", - listener.id, status) - self._mark_pool_status(listener.default_pool, status) - - def _mark_l7policy_status(self, l7policy, status): - self.l7policy_repo.update( - db_apis.get_session(), l7policy.id, - provisioning_status=status) - - LOG.debug("Marking all l7rules of l7policy %s %s", - l7policy.id, status) - for l7rule in l7policy.l7rules: - self._mark_l7rule_status(l7rule, status) - - if l7policy.redirect_pool: - LOG.debug("Marking redirect pool of l7policy %s %s", - l7policy.id, status) - self._mark_pool_status(l7policy.redirect_pool, status) - - def _mark_l7rule_status(self, l7rule, status): - self.l7rule_repo.update( - db_apis.get_session(), l7rule.id, - provisioning_status=status) - - def _mark_pool_status(self, pool, status): - self.pool_repo.update( - db_apis.get_session(), pool.id, - provisioning_status=status) - if pool.health_monitor: - LOG.debug("Marking health monitor of pool %s %s", pool.id, status) - self._mark_hm_status(pool.health_monitor, status) - - LOG.debug("Marking all members of pool %s %s", pool.id, status) - for member in pool.members: - self._mark_member_status(member, status) - - def _mark_hm_status(self, hm, status): - self.health_mon_repo.update( - db_apis.get_session(), hm.id, - provisioning_status=status) - - def _mark_member_status(self, member, status): - self.member_repo.update( - db_apis.get_session(), member.id, - provisioning_status=status) - - def revert(self, loadbalancer, *args, **kwargs): - """Mark the load balancer as broken and ready to be cleaned up. - - This also puts all sub-objects of the load balancer to ERROR state if - self.mark_subobjects is True - - :param loadbalancer: Load balancer object that failed to update - :returns: None - """ - - if self.mark_subobjects: - LOG.debug("Marking all listeners and pools of loadbalancer %s" - " ERROR", loadbalancer.id) - for listener in loadbalancer.listeners: - try: - self._mark_listener_status(listener, constants.ERROR) - except Exception: - LOG.warning("Error updating listener %s provisioning " - "status", listener.id) - for pool in loadbalancer.pools: - try: - self._mark_pool_status(pool, constants.ERROR) - except Exception: - LOG.warning("Error updating pool %s provisioning " - "status", pool.id) - - -class UpdateLBServerGroupInDB(BaseDatabaseTask): - """Update the server group id info for load balancer in DB.""" - - def execute(self, loadbalancer_id, server_group_id): - """Update the server group id info for load balancer in DB. - - :param loadbalancer_id: Id of a load balancer to update - :param server_group_id: Id of a server group to associate with - the load balancer - :returns: None - """ - - LOG.debug("Server Group updated with id: %s for load balancer id: %s:", - server_group_id, loadbalancer_id) - self.loadbalancer_repo.update(db_apis.get_session(), - id=loadbalancer_id, - server_group_id=server_group_id) - - def revert(self, loadbalancer_id, server_group_id, *args, **kwargs): - """Remove server group information from a load balancer in DB. - - :param loadbalancer_id: Id of a load balancer that failed to update - :param server_group_id: Id of a server group that couldn't be - associated with the load balancer - :returns: None - """ - LOG.warning('Reverting Server Group updated with id: %(s1)s for ' - 'load balancer id: %(s2)s ', - {'s1': server_group_id, 's2': loadbalancer_id}) - try: - self.loadbalancer_repo.update(db_apis.get_session(), - id=loadbalancer_id, - server_group_id=None) - except Exception as e: - LOG.error("Failed to update load balancer %(lb)s " - "server_group_id to None due to: " - "%(except)s", {'lb': loadbalancer_id, 'except': str(e)}) - - -class MarkLBDeletedInDB(BaseDatabaseTask): - """Mark the load balancer deleted in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, loadbalancer): - """Mark the load balancer as deleted in DB. - - :param loadbalancer: Load balancer object to be updated - :returns: None - """ - - LOG.debug("Mark DELETED in DB for load balancer id: %s", - loadbalancer.id) - self.loadbalancer_repo.update(db_apis.get_session(), - loadbalancer.id, - provisioning_status=constants.DELETED) - - -class MarkLBPendingDeleteInDB(BaseDatabaseTask): - """Mark the load balancer pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, loadbalancer): - """Mark the load balancer as pending delete in DB. - - :param loadbalancer: Load balancer object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s", - loadbalancer.id) - self.loadbalancer_repo.update(db_apis.get_session(), - loadbalancer.id, - provisioning_status=(constants. - PENDING_DELETE)) - - -class MarkLBAndListenersActiveInDB(BaseDatabaseTask): - """Mark the load balancer and specified listeners active in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, loadbalancer, listeners): - """Mark the load balancer and listeners as active in DB. - - :param loadbalancer: Load balancer object to be updated - :param listeners: Listener objects to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for load balancer id: %s " - "and updating status for listener ids: %s", loadbalancer.id, - ', '.join([listener.id for listener in listeners])) - self.loadbalancer_repo.update(db_apis.get_session(), - loadbalancer.id, - provisioning_status=constants.ACTIVE) - for listener in listeners: - self.listener_repo.prov_status_active_if_not_error( - db_apis.get_session(), listener.id) - - def revert(self, loadbalancer, listeners, *args, **kwargs): - """Mark the load balancer and listeners as broken. - - :param loadbalancer: Load balancer object that failed to update - :param listeners: Listener objects that failed to update - :returns: None - """ - - LOG.warning("Reverting mark listeners active in DB " - "for listener ids: %(list)s", - {'list': ', '.join([listener.id - for listener in listeners])}) - for listener in listeners: - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class MarkListenerDeletedInDB(BaseDatabaseTask): - """Mark the listener deleted in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, listener): - """Mark the listener as deleted in DB - - :param listener: The listener to be marked deleted - :returns: None - """ - - LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id) - self.listener_repo.update(db_apis.get_session(), listener.id, - provisioning_status=constants.DELETED) - - def revert(self, listener, *args, **kwargs): - """Mark the listener ERROR since the delete couldn't happen - - :param listener: The listener that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting mark listener deleted in DB " - "for listener id %s", listener.id) - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class MarkListenerPendingDeleteInDB(BaseDatabaseTask): - """Mark the listener pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, listener): - """Mark the listener as pending delete in DB. - - :param listener: The listener to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for listener id: %s", - listener.id) - self.listener_repo.update(db_apis.get_session(), listener.id, - provisioning_status=constants.PENDING_DELETE) - - def revert(self, listener, *args, **kwargs): - """Mark the listener as broken and ready to be cleaned up. - - :param listener: The listener that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting mark listener pending delete in DB " - "for listener id %s", listener.id) - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class UpdateLoadbalancerInDB(BaseDatabaseTask): - """Update the loadbalancer in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, loadbalancer, update_dict): - """Update the loadbalancer in the DB - - :param loadbalancer: The load balancer to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer.id) - if update_dict.get('vip'): - vip_dict = update_dict.pop('vip') - self.vip_repo.update(db_apis.get_session(), - loadbalancer.vip.load_balancer_id, - **vip_dict) - self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, - **update_dict) - - -class UpdateHealthMonInDB(BaseDatabaseTask): - """Update the health monitor in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon, update_dict): - """Update the health monitor in the DB - - :param health_mon: The health monitor to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for health monitor id: %s ", health_mon.id) - self.health_mon_repo.update(db_apis.get_session(), health_mon.id, - **update_dict) - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor ERROR since the update couldn't happen - - :param health_mon: The health monitor that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update health monitor in DB " - "for health monitor id %s", health_mon.id) - try: - self.health_mon_repo.update(db_apis.get_session(), - health_mon.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update health monitor %(hm)s " - "provisioning_status to ERROR due to: %(except)s", - {'hm': health_mon.id, 'except': str(e)}) - - -class UpdateListenerInDB(BaseDatabaseTask): - """Update the listener in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, listener, update_dict): - """Update the listener in the DB - - :param listener: The listener to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for listener id: %s ", listener.id) - self.listener_repo.update(db_apis.get_session(), listener.id, - **update_dict) - - def revert(self, listener, *args, **kwargs): - """Mark the listener ERROR since the update couldn't happen - - :param listener: The listener that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update listener in DB " - "for listener id %s", listener.id) - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class UpdateMemberInDB(BaseDatabaseTask): - """Update the member in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member, update_dict): - """Update the member in the DB - - :param member: The member to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for member id: %s ", member.id) - self.member_repo.update(db_apis.get_session(), member.id, - **update_dict) - - def revert(self, member, *args, **kwargs): - """Mark the member ERROR since the update couldn't happen - - :param member: The member that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update member in DB " - "for member id %s", member.id) - try: - self.member_repo.update(db_apis.get_session(), member.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update member %(member)s provisioning_status " - "to ERROR due to: %(except)s", {'member': member.id, - 'except': str(e)}) - - -class UpdatePoolInDB(BaseDatabaseTask): - """Update the pool in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool, update_dict): - """Update the pool in the DB - - :param pool: The pool to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for pool id: %s ", pool.id) - self.repos.update_pool_and_sp(db_apis.get_session(), pool.id, - update_dict) - - def revert(self, pool, *args, **kwargs): - """Mark the pool ERROR since the update couldn't happen - - :param pool: The pool that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update pool in DB for pool id %s", pool.id) - try: - self.repos.update_pool_and_sp( - db_apis.get_session(), pool.id, - {'provisioning_status': constants.ERROR}) - except Exception as e: - LOG.error("Failed to update pool %(pool)s provisioning_status to " - "ERROR due to: %(except)s", {'pool': pool.id, - 'except': str(e)}) - - -class UpdateL7PolicyInDB(BaseDatabaseTask): - """Update the L7 policy in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy, update_dict): - """Update the L7 policy in the DB - - :param l7policy: The L7 policy to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for l7policy id: %s ", l7policy.id) - self.l7policy_repo.update(db_apis.get_session(), l7policy.id, - **update_dict) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy ERROR since the update couldn't happen - - :param l7policy: L7 policy that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update l7policy in DB " - "for l7policy id %s", l7policy.id) - try: - self.l7policy_repo.update(db_apis.get_session(), l7policy.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update l7policy %(l7p)s provisioning_status " - "to ERROR due to: %(except)s", {'l7p': l7policy.id, - 'except': str(e)}) - - -class UpdateL7RuleInDB(BaseDatabaseTask): - """Update the L7 rule in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule, update_dict): - """Update the L7 rule in the DB - - :param l7rule: The L7 rule to be updated - :param update_dict: The dictionary of updates to apply - :returns: None - """ - - LOG.debug("Update DB for l7rule id: %s ", l7rule.id) - self.l7rule_repo.update(db_apis.get_session(), l7rule.id, - **update_dict) - - def revert(self, l7rule, *args, **kwargs): - """Mark the L7 rule ERROR since the update couldn't happen - - :param l7rule: L7 rule that couldn't be updated - :returns: None - """ - - LOG.warning("Reverting update l7rule in DB " - "for l7rule id %s", l7rule.id) - try: - self.l7policy_repo.update(db_apis.get_session(), - l7rule.l7policy.id, - provisioning_status=constants.ERROR) - except Exception as e: - LOG.error("Failed to update L7rule %(l7r)s provisioning_status to " - "ERROR due to: %(except)s", {'l7r': l7rule.l7policy.id, - 'except': str(e)}) - - -class GetAmphoraDetails(BaseDatabaseTask): - """Task to retrieve amphora network details.""" - - def execute(self, amphora): - """Retrieve amphora network details. - - :param amphora: Amphora which network details are required - :returns: data_models.Amphora object - """ - return data_models.Amphora(id=amphora.id, - vrrp_ip=amphora.vrrp_ip, - ha_ip=amphora.ha_ip, - vrrp_port_id=amphora.vrrp_port_id, - ha_port_id=amphora.ha_port_id, - role=amphora.role, - vrrp_id=amphora.vrrp_id, - vrrp_priority=amphora.vrrp_priority) - - -class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): - """Task to pull the amphorae from a loadbalancer.""" - - def execute(self, loadbalancer_id): - """Pull the amphorae from a loadbalancer. - - :param loadbalancer_id: Load balancer ID to get amphorae from - :returns: A list of Listener objects - """ - amphorae = [] - loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) - for amp in loadbalancer.amphorae: - a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, - show_deleted=False) - if a is None: - continue - amphorae.append(a) - return amphorae - - -class GetListenersFromLoadbalancer(BaseDatabaseTask): - """Task to pull the listeners from a loadbalancer.""" - - def execute(self, loadbalancer): - """Pull the listeners from a loadbalancer. - - :param loadbalancer: Load balancer which listeners are required - :returns: A list of Listener objects - """ - listeners = [] - for listener in loadbalancer.listeners: - lb = self.listener_repo.get(db_apis.get_session(), id=listener.id) - lb.load_balancer = loadbalancer - listeners.append(lb) - return listeners - - -class GetLoadBalancer(BaseDatabaseTask): - """Get an load balancer object from the database.""" - - def execute(self, loadbalancer_id, *args, **kwargs): - """Get an load balancer object from the database. - - :param loadbalancer_id: The load balancer ID to lookup - :returns: The load balancer object - """ - - LOG.debug("Get load balancer from DB for load balancer id: %s", - loadbalancer_id) - return self.loadbalancer_repo.get(db_apis.get_session(), - id=loadbalancer_id) - - -class GetVipFromLoadbalancer(BaseDatabaseTask): - """Task to pull the vip from a loadbalancer.""" - - def execute(self, loadbalancer): - """Pull the vip from a loadbalancer. - - :param loadbalancer: Load balancer which VIP is required - :returns: VIP associated with a given load balancer - """ - return loadbalancer.vip - - -class CreateVRRPGroupForLB(BaseDatabaseTask): - """Create a VRRP group for a load balancer.""" - - def execute(self, loadbalancer_id): - """Create a VRRP group for a load balancer. - - :param loadbalancer_id: Load balancer ID for which a VRRP group - should be created - """ - try: - self.repos.vrrpgroup.create( - db_apis.get_session(), - load_balancer_id=loadbalancer_id, - vrrp_group_name=str(loadbalancer_id).replace('-', ''), - vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, - vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], - advert_int=CONF.keepalived_vrrp.vrrp_advert_int) - except odb_exceptions.DBDuplicateEntry: - LOG.debug('VRRP_GROUP entry already exists for load balancer, ' - 'skipping create.') - - -class DisableAmphoraHealthMonitoring(BaseDatabaseTask): - """Disable amphora health monitoring. - - This disables amphora health monitoring by removing it from - the amphora_health table. - """ - - def execute(self, amphora): - """Disable health monitoring for an amphora - - :param amphora: The amphora to disable health monitoring for - :returns: None - """ - self._delete_from_amp_health(amphora.id) - - -class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): - """Disable health monitoring on the LB amphorae. - - This disables amphora health monitoring by removing it from - the amphora_health table for each amphora on a load balancer. - """ - - def execute(self, loadbalancer): - """Disable health monitoring for amphora on a load balancer - - :param loadbalancer: The load balancer to disable health monitoring on - :returns: None - """ - for amphora in loadbalancer.amphorae: - self._delete_from_amp_health(amphora.id) - - -class MarkAmphoraHealthBusy(BaseDatabaseTask): - """Mark amphora health monitoring busy. - - This prevents amphora failover by marking the amphora busy in - the amphora_health table. - """ - - def execute(self, amphora): - """Mark amphora health monitoring busy - - :param amphora: The amphora to mark amphora health busy - :returns: None - """ - self._mark_amp_health_busy(amphora.id) - - -class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): - """Mark amphorae health monitoring busy for the LB. - - This prevents amphorae failover by marking each amphora of a given - load balancer busy in the amphora_health table. - """ - - def execute(self, loadbalancer): - """Marks amphorae health busy for each amphora on a load balancer - - :param loadbalancer: The load balancer to mark amphorae health busy - :returns: None - """ - for amphora in loadbalancer.amphorae: - self._mark_amp_health_busy(amphora.id) - - -class MarkHealthMonitorActiveInDB(BaseDatabaseTask): - """Mark the health monitor ACTIVE in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Mark the health monitor ACTIVE in DB. - - :param health_mon: Health Monitor object to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for health monitor id: %s", - health_mon.id) - - op_status = (constants.ONLINE if health_mon.enabled - else constants.OFFLINE) - self.health_mon_repo.update(db_apis.get_session(), - health_mon.id, - provisioning_status=constants.ACTIVE, - operating_status=op_status) - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor as broken - - :param health_mon: Health Monitor object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark health montor ACTIVE in DB " - "for health monitor id %s", health_mon.id) - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - - -class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask): - """Mark the health monitor pending create in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Mark the health monitor as pending create in DB. - - :param health_mon: Health Monitor object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s", - health_mon.id) - self.health_mon_repo.update(db_apis.get_session(), - health_mon.id, - provisioning_status=(constants. - PENDING_CREATE)) - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor as broken - - :param health_mon: Health Monitor object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark health monitor pending create in DB " - "for health monitor id %s", health_mon.id) - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - - -class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask): - """Mark the health monitor pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Mark the health monitor as pending delete in DB. - - :param health_mon: Health Monitor object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s", - health_mon.id) - self.health_mon_repo.update(db_apis.get_session(), - health_mon.id, - provisioning_status=(constants. - PENDING_DELETE)) - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor as broken - - :param health_mon: Health Monitor object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark health monitor pending delete in DB " - "for health monitor id %s", health_mon.id) - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - - -class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask): - """Mark the health monitor pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Mark the health monitor as pending update in DB. - - :param health_mon: Health Monitor object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s", - health_mon.id) - self.health_mon_repo.update(db_apis.get_session(), - health_mon.id, - provisioning_status=(constants. - PENDING_UPDATE)) - - def revert(self, health_mon, *args, **kwargs): - """Mark the health monitor as broken - - :param health_mon: Health Monitor object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark health monitor pending update in DB " - "for health monitor id %s", health_mon.id) - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - - -class MarkL7PolicyActiveInDB(BaseDatabaseTask): - """Mark the l7policy ACTIVE in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Mark the l7policy ACTIVE in DB. - - :param l7policy: L7Policy object to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for l7policy id: %s", - l7policy.id) - - op_status = constants.ONLINE if l7policy.enabled else constants.OFFLINE - self.l7policy_repo.update(db_apis.get_session(), - l7policy.id, - provisioning_status=constants.ACTIVE, - operating_status=op_status) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy as broken - - :param l7policy: L7Policy object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7policy ACTIVE in DB " - "for l7policy id %s", l7policy.id) - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) - - -class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask): - """Mark the l7policy pending create in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Mark the l7policy as pending create in DB. - - :param l7policy: L7Policy object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s", - l7policy.id) - self.l7policy_repo.update(db_apis.get_session(), - l7policy.id, - provisioning_status=constants.PENDING_CREATE) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy as broken - - :param l7policy: L7Policy object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7policy pending create in DB " - "for l7policy id %s", l7policy.id) - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) - - -class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask): - """Mark the l7policy pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Mark the l7policy as pending delete in DB. - - :param l7policy: L7Policy object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s", - l7policy.id) - self.l7policy_repo.update(db_apis.get_session(), - l7policy.id, - provisioning_status=constants.PENDING_DELETE) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy as broken - - :param l7policy: L7Policy object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7policy pending delete in DB " - "for l7policy id %s", l7policy.id) - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) - - -class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask): - """Mark the l7policy pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Mark the l7policy as pending update in DB. - - :param l7policy: L7Policy object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s", - l7policy.id) - self.l7policy_repo.update(db_apis.get_session(), - l7policy.id, - provisioning_status=(constants. - PENDING_UPDATE)) - - def revert(self, l7policy, *args, **kwargs): - """Mark the l7policy as broken - - :param l7policy: L7Policy object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7policy pending update in DB " - "for l7policy id %s", l7policy.id) - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) - - -class MarkL7RuleActiveInDB(BaseDatabaseTask): - """Mark the l7rule ACTIVE in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Mark the l7rule ACTIVE in DB. - - :param l7rule: L7Rule object to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for l7rule id: %s", - l7rule.id) - op_status = constants.ONLINE if l7rule.enabled else constants.OFFLINE - self.l7rule_repo.update(db_apis.get_session(), - l7rule.id, - provisioning_status=constants.ACTIVE, - operating_status=op_status) - - def revert(self, l7rule, *args, **kwargs): - """Mark the l7rule as broken - - :param l7rule: L7Rule object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7rule ACTIVE in DB " - "for l7rule id %s", l7rule.id) - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - - -class MarkL7RulePendingCreateInDB(BaseDatabaseTask): - """Mark the l7rule pending create in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Mark the l7rule as pending create in DB. - - :param l7rule: L7Rule object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s", - l7rule.id) - self.l7rule_repo.update(db_apis.get_session(), - l7rule.id, - provisioning_status=constants.PENDING_CREATE) - - def revert(self, l7rule, *args, **kwargs): - """Mark the l7rule as broken - - :param l7rule: L7Rule object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7rule pending create in DB " - "for l7rule id %s", l7rule.id) - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - - -class MarkL7RulePendingDeleteInDB(BaseDatabaseTask): - """Mark the l7rule pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Mark the l7rule as pending delete in DB. - - :param l7rule: L7Rule object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s", - l7rule.id) - self.l7rule_repo.update(db_apis.get_session(), - l7rule.id, - provisioning_status=constants.PENDING_DELETE) - - def revert(self, l7rule, *args, **kwargs): - """Mark the l7rule as broken - - :param l7rule: L7Rule object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7rule pending delete in DB " - "for l7rule id %s", l7rule.id) - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - - -class MarkL7RulePendingUpdateInDB(BaseDatabaseTask): - """Mark the l7rule pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Mark the l7rule as pending update in DB. - - :param l7rule: L7Rule object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s", - l7rule.id) - self.l7rule_repo.update(db_apis.get_session(), - l7rule.id, - provisioning_status=constants.PENDING_UPDATE) - - def revert(self, l7rule, *args, **kwargs): - """Mark the l7rule as broken - - :param l7rule: L7Rule object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark l7rule pending update in DB " - "for l7rule id %s", l7rule.id) - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - - -class MarkMemberActiveInDB(BaseDatabaseTask): - """Mark the member ACTIVE in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Mark the member ACTIVE in DB. - - :param member: Member object to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for member id: %s", member.id) - self.member_repo.update(db_apis.get_session(), - member.id, - provisioning_status=constants.ACTIVE) - - def revert(self, member, *args, **kwargs): - """Mark the member as broken - - :param member: Member object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark member ACTIVE in DB " - "for member id %s", member.id) - self.task_utils.mark_member_prov_status_error(member.id) - - -class MarkMemberPendingCreateInDB(BaseDatabaseTask): - """Mark the member pending create in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Mark the member as pending create in DB. - - :param member: Member object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING CREATE in DB for member id: %s", member.id) - self.member_repo.update(db_apis.get_session(), - member.id, - provisioning_status=constants.PENDING_CREATE) - - def revert(self, member, *args, **kwargs): - """Mark the member as broken - - :param member: Member object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark member pending create in DB " - "for member id %s", member.id) - self.task_utils.mark_member_prov_status_error(member.id) - - -class MarkMemberPendingDeleteInDB(BaseDatabaseTask): - """Mark the member pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Mark the member as pending delete in DB. - - :param member: Member object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for member id: %s", member.id) - self.member_repo.update(db_apis.get_session(), - member.id, - provisioning_status=constants.PENDING_DELETE) - - def revert(self, member, *args, **kwargs): - """Mark the member as broken - - :param member: Member object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark member pending delete in DB " - "for member id %s", member.id) - self.task_utils.mark_member_prov_status_error(member.id) - - -class MarkMemberPendingUpdateInDB(BaseDatabaseTask): - """Mark the member pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Mark the member as pending update in DB. - - :param member: Member object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for member id: %s", - member.id) - self.member_repo.update(db_apis.get_session(), - member.id, - provisioning_status=constants.PENDING_UPDATE) - - def revert(self, member, *args, **kwargs): - """Mark the member as broken - - :param member: Member object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark member pending update in DB " - "for member id %s", member.id) - self.task_utils.mark_member_prov_status_error(member.id) - - -class MarkPoolActiveInDB(BaseDatabaseTask): - """Mark the pool ACTIVE in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Mark the pool ACTIVE in DB. - - :param pool: Pool object to be updated - :returns: None - """ - - LOG.debug("Mark ACTIVE in DB for pool id: %s", - pool.id) - self.pool_repo.update(db_apis.get_session(), - pool.id, - provisioning_status=constants.ACTIVE) - - def revert(self, pool, *args, **kwargs): - """Mark the pool as broken - - :param pool: Pool object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark pool ACTIVE in DB for pool id %s", pool.id) - self.task_utils.mark_pool_prov_status_error(pool.id) - - -class MarkPoolPendingCreateInDB(BaseDatabaseTask): - """Mark the pool pending create in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Mark the pool as pending create in DB. - - :param pool: Pool object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING CREATE in DB for pool id: %s", - pool.id) - self.pool_repo.update(db_apis.get_session(), - pool.id, - provisioning_status=constants.PENDING_CREATE) - - def revert(self, pool, *args, **kwargs): - """Mark the pool as broken - - :param pool: Pool object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark pool pending create in DB " - "for pool id %s", pool.id) - self.task_utils.mark_pool_prov_status_error(pool.id) - - -class MarkPoolPendingDeleteInDB(BaseDatabaseTask): - """Mark the pool pending delete in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Mark the pool as pending delete in DB. - - :param pool: Pool object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING DELETE in DB for pool id: %s", - pool.id) - self.pool_repo.update(db_apis.get_session(), - pool.id, - provisioning_status=constants.PENDING_DELETE) - - def revert(self, pool, *args, **kwargs): - """Mark the pool as broken - - :param pool: Pool object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark pool pending delete in DB " - "for pool id %s", pool.id) - self.task_utils.mark_pool_prov_status_error(pool.id) - - -class MarkPoolPendingUpdateInDB(BaseDatabaseTask): - """Mark the pool pending update in the DB. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool): - """Mark the pool as pending update in DB. - - :param pool: Pool object to be updated - :returns: None - """ - - LOG.debug("Mark PENDING UPDATE in DB for pool id: %s", - pool.id) - self.pool_repo.update(db_apis.get_session(), - pool.id, - provisioning_status=constants.PENDING_UPDATE) - - def revert(self, pool, *args, **kwargs): - """Mark the pool as broken - - :param pool: Pool object that failed to update - :returns: None - """ - - LOG.warning("Reverting mark pool pending update in DB " - "for pool id %s", pool.id) - self.task_utils.mark_pool_prov_status_error(pool.id) - - -class DecrementHealthMonitorQuota(BaseDatabaseTask): - """Decrements the health monitor quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, health_mon): - """Decrements the health monitor quota. - - :param health_mon: The health monitor to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing health monitor quota for " - "project: %s ", health_mon.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.HealthMonitor, - health_mon.project_id) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement health monitor quota for ' - 'project: %(proj)s the project may have excess ' - 'quota in use.', {'proj': health_mon.project_id}) - lock_session.rollback() - - def revert(self, health_mon, result, *args, **kwargs): - """Re-apply the quota - - :param health_mon: The health monitor to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for health monitor on project' - ' %(proj)s Project quota counts may be incorrect.', - {'proj': health_mon.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.HealthMonitor, - health_mon.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class DecrementListenerQuota(BaseDatabaseTask): - """Decrements the listener quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, listener): - """Decrements the listener quota. - - :param listener: The listener to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing listener quota for " - "project: %s ", listener.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.Listener, - listener.project_id) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement listener quota for project: ' - '%(proj)s the project may have excess quota in use.', - {'proj': listener.project_id}) - lock_session.rollback() - - def revert(self, listener, result, *args, **kwargs): - """Re-apply the quota - - :param listener: The listener to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for listener on project ' - '%(proj)s Project quota counts may be incorrect.', - {'proj': listener.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.Listener, - listener.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class DecrementLoadBalancerQuota(BaseDatabaseTask): - """Decrements the load balancer quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, loadbalancer): - """Decrements the load balancer quota. - - :param loadbalancer: The load balancer to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing load balancer quota for " - "project: %s ", loadbalancer.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.LoadBalancer, - loadbalancer.project_id) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement load balancer quota for ' - 'project: %(proj)s the project may have excess ' - 'quota in use.', {'proj': loadbalancer.project_id}) - lock_session.rollback() - - def revert(self, loadbalancer, result, *args, **kwargs): - """Re-apply the quota - - :param loadbalancer: The load balancer to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for load balancer on project ' - '%(proj)s Project quota counts may be incorrect.', - {'proj': loadbalancer.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.LoadBalancer, - loadbalancer.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class DecrementMemberQuota(BaseDatabaseTask): - """Decrements the member quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, member): - """Decrements the member quota. - - :param member: The member to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing member quota for " - "project: %s ", member.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.Member, - member.project_id) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement member quota for project: ' - '%(proj)s the project may have excess quota in use.', - {'proj': member.project_id}) - lock_session.rollback() - - def revert(self, member, result, *args, **kwargs): - """Re-apply the quota - - :param member: The member to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for member on project %(proj)s ' - 'Project quota counts may be incorrect.', - {'proj': member.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.Member, - member.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class DecrementPoolQuota(BaseDatabaseTask): - """Decrements the pool quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool, pool_child_count): - """Decrements the pool quota. - - :param pool: The pool to decrement the quota on - :returns: None - """ - - LOG.debug("Decrementing pool quota for " - "project: %s ", pool.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.Pool, - pool.project_id) - - # Pools cascade delete members and health monitors - # update the quota for those items as well. - if pool_child_count['HM'] > 0: - self.repos.decrement_quota(lock_session, - data_models.HealthMonitor, - pool.project_id) - if pool_child_count['member'] > 0: - self.repos.decrement_quota( - lock_session, data_models.Member, - pool.project_id, quantity=pool_child_count['member']) - - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement pool quota for project: ' - '%(proj)s the project may have excess quota in use.', - {'proj': pool.project_id}) - lock_session.rollback() - - def revert(self, pool, pool_child_count, result, *args, **kwargs): - """Re-apply the quota - - :param project_id: The id of project to decrement the quota on - :returns: None - """ - - LOG.warning('Reverting decrement quota for pool on project %(proj)s ' - 'Project quota counts may be incorrect.', - {'proj': pool.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - # These are all independent to maximize the correction - # in case other quota actions have occurred - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.Pool, - pool.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - - # Attempt to increment back the health monitor quota - if pool_child_count['HM'] > 0: - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.HealthMonitor, - pool.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - - # Attempt to increment back the member quota - # This is separate calls to maximize the correction - # should other factors have increased the in use quota - # before this point in the revert flow - for i in range(pool_child_count['member']): - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.Member, - pool.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class CountPoolChildrenForQuota(BaseDatabaseTask): - """Counts the pool child resources for quota management. - - Since the children of pools are cleaned up by the sqlalchemy - cascade delete settings, we need to collect the quota counts - for the child objects early. - - """ - - def execute(self, pool): - """Count the pool child resources for quota management - - :param pool: The pool to count children on - :returns: None - """ - - LOG.debug("Counting pool children for " - "project: %s ", pool.project_id) - - health_mon_count = 1 if pool.health_monitor else 0 - member_count = len(pool.members) - - return {'HM': health_mon_count, 'member': member_count} - - -class DecrementL7policyQuota(BaseDatabaseTask): - """Decrements the l7policy quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7policy): - """Decrements the l7policy quota. - - :param l7policy: The l7policy to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing l7policy quota for " - "project: %s ", l7policy.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.L7Policy, - l7policy.project_id) - - if l7policy.l7rules: - self.repos.decrement_quota(lock_session, - data_models.L7Rule, - l7policy.project_id, - quantity=len(l7policy.l7rules)) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement l7policy quota for project: ' - '%(proj)s the project may have excess quota in use.', - {'proj': l7policy.project_id}) - lock_session.rollback() - - def revert(self, l7policy, result, *args, **kwargs): - """Re-apply the quota - - :param l7policy: The l7policy to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for l7policy on project' - ' %(proj)s Project quota counts may be incorrect.', - {'proj': l7policy.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.L7Policy, - l7policy.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - - # Attempt to increment back the L7Rule quota - for i in range(len(l7policy.l7rules)): - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.L7Rule, - l7policy.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class DecrementL7ruleQuota(BaseDatabaseTask): - """Decrements the l7rule quota for a project. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, l7rule): - """Decrements the l7rule quota. - - :param l7rule: The l7rule to decrement the quota on. - :returns: None - """ - - LOG.debug("Decrementing l7rule quota for " - "project: %s ", l7rule.project_id) - - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.decrement_quota(lock_session, - data_models.L7Rule, - l7rule.project_id) - lock_session.commit() - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error('Failed to decrement l7rule quota for project: ' - '%(proj)s the project may have excess quota in use.', - {'proj': l7rule.project_id}) - lock_session.rollback() - - def revert(self, l7rule, result, *args, **kwargs): - """Re-apply the quota - - :param l7rule: The l7rule to decrement the quota on. - :returns: None - """ - - LOG.warning('Reverting decrement quota for l7rule on project %(proj)s ' - 'Project quota counts may be incorrect.', - {'proj': l7rule.project_id}) - - # Increment the quota back if this task wasn't the failure - if not isinstance(result, failure.Failure): - - try: - session = db_apis.get_session() - lock_session = db_apis.get_session(autocommit=False) - try: - self.repos.check_quota_met(session, - lock_session, - data_models.L7Rule, - l7rule.project_id) - lock_session.commit() - except Exception: - lock_session.rollback() - except Exception: - # Don't fail the revert flow - pass - - -class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask): - """Updates the members of a pool operating status. - - Since sqlalchemy will likely retry by itself always revert if it fails - """ - - def execute(self, pool, operating_status): - """Update the members of a pool operating status in DB. - - :param pool: Pool object to be updated - :param operating_status: Operating status to set - :returns: None - """ - - LOG.debug("Updating member operating status to %(status)s in DB for " - "pool id: %(pool)s", {'status': operating_status, - 'pool': pool.id}) - self.member_repo.update_pool_members(db_apis.get_session(), - pool.id, - operating_status=operating_status) diff --git a/octavia/controller/worker/v1/tasks/lifecycle_tasks.py b/octavia/controller/worker/v1/tasks/lifecycle_tasks.py deleted file mode 100644 index 41b8288ba3..0000000000 --- a/octavia/controller/worker/v1/tasks/lifecycle_tasks.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2016 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from taskflow import task - -from octavia.controller.worker import task_utils as task_utilities - - -class BaseLifecycleTask(task.Task): - """Base task to instansiate common classes.""" - - def __init__(self, **kwargs): - self.task_utils = task_utilities.TaskUtils() - super().__init__(**kwargs) - - -class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask): - """Task to checkpoint Amphora lifecycle milestones.""" - - def execute(self, amphora_id): - pass - - def revert(self, amphora_id, *args, **kwargs): - self.task_utils.mark_amphora_status_error(amphora_id) - - -class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask): - """Task to checkpoint Amphora lifecycle milestones.""" - - def execute(self, amphora): - pass - - def revert(self, amphora, *args, **kwargs): - super().revert(amphora.id) - - -class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a member to ERROR on revert.""" - - def execute(self, health_mon, listeners, loadbalancer): - pass - - def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_health_mon_prov_status_error(health_mon.id) - self.task_utils.mark_pool_prov_status_active(health_mon.pool_id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - - -class L7PolicyToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a l7policy to ERROR on revert.""" - - def execute(self, l7policy, listeners, loadbalancer): - pass - - def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_l7policy_prov_status_error(l7policy.id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - - -class L7RuleToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a l7rule to ERROR on revert.""" - - def execute(self, l7rule, listeners, loadbalancer): - pass - - def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_l7rule_prov_status_error(l7rule.id) - self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - - -class ListenerToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a listener to ERROR on revert.""" - - def execute(self, listener): - pass - - def revert(self, listener, *args, **kwargs): - self.task_utils.mark_listener_prov_status_error(listener.id) - self.task_utils.mark_loadbalancer_prov_status_active( - listener.load_balancer.id) - - -class ListenersToErrorOnRevertTask(BaseLifecycleTask): - """Task to set listeners to ERROR on revert.""" - - def execute(self, listeners, loadbalancer): - pass - - def revert(self, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_loadbalancer_prov_status_active( - loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_error(listener.id) - - -class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask): - """Task to set the load balancer to ERROR on revert.""" - - def execute(self, loadbalancer_id): - pass - - def revert(self, loadbalancer_id, *args, **kwargs): - self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id) - - -class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask): - """Task to set the load balancer to ERROR on revert.""" - - def execute(self, loadbalancer): - pass - - def revert(self, loadbalancer, *args, **kwargs): - super().revert(loadbalancer.id) - - -class MemberToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a member to ERROR on revert.""" - - def execute(self, member, listeners, loadbalancer, pool): - pass - - def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs): - self.task_utils.mark_member_prov_status_error(member.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - self.task_utils.mark_pool_prov_status_active(pool.id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - - -class MembersToErrorOnRevertTask(BaseLifecycleTask): - """Task to set members to ERROR on revert.""" - - def execute(self, members, listeners, loadbalancer, pool): - pass - - def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs): - for m in members: - self.task_utils.mark_member_prov_status_error(m.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) - self.task_utils.mark_pool_prov_status_active(pool.id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - - -class PoolToErrorOnRevertTask(BaseLifecycleTask): - """Task to set a pool to ERROR on revert.""" - - def execute(self, pool, listeners, loadbalancer): - pass - - def revert(self, pool, listeners, loadbalancer, *args, **kwargs): - self.task_utils.mark_pool_prov_status_error(pool.id) - self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id) - for listener in listeners: - self.task_utils.mark_listener_prov_status_active(listener.id) diff --git a/octavia/controller/worker/v1/tasks/model_tasks.py b/octavia/controller/worker/v1/tasks/model_tasks.py deleted file mode 100644 index 72557cafcb..0000000000 --- a/octavia/controller/worker/v1/tasks/model_tasks.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow import task - - -class DeleteModelObject(task.Task): - """Task to delete an object in a model.""" - - def execute(self, object): - - object.delete() - - -class UpdateAttributes(task.Task): - """Task to update an object for changes.""" - - def execute(self, object, update_dict): - """Update an object and its associated resources. - - Note: This relies on the data_model update() methods to handle complex - objects with nested objects (LoadBalancer.vip, - Pool.session_persistence, etc.) - - :param object: The object will be updated. - :param update_dict: The updates dictionary. - :returns: None - """ - object.update(update_dict) diff --git a/octavia/controller/worker/v1/tasks/network_tasks.py b/octavia/controller/worker/v1/tasks/network_tasks.py deleted file mode 100644 index 0c49d3cebd..0000000000 --- a/octavia/controller/worker/v1/tasks/network_tasks.py +++ /dev/null @@ -1,970 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import excutils -from taskflow import task -from taskflow.types import failure -import tenacity - -from octavia.common import constants -from octavia.common import utils -from octavia.controller.worker import task_utils -from octavia.db import api as db_apis -from octavia.db import repositories -from octavia.network import base -from octavia.network import data_models as n_data_models - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class BaseNetworkTask(task.Task): - """Base task to load drivers common to the tasks.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self._network_driver = None - self.task_utils = task_utils.TaskUtils() - self.lb_repo = repositories.LoadBalancerRepository() - - @property - def network_driver(self): - if self._network_driver is None: - self._network_driver = utils.get_network_driver() - return self._network_driver - - -class CalculateAmphoraDelta(BaseNetworkTask): - - default_provides = constants.DELTA - - def execute(self, loadbalancer, amphora, availability_zone): - LOG.debug("Calculating network delta for amphora id: %s", amphora.id) - - vip_subnet_to_net_map = { - loadbalancer.vip.subnet_id: - loadbalancer.vip.network_id, - } - - # Figure out what networks we want - # seed with lb network(s) - if (availability_zone and - availability_zone.get(constants.MANAGEMENT_NETWORK)): - management_nets = [ - availability_zone.get(constants.MANAGEMENT_NETWORK)] - else: - management_nets = CONF.controller_worker.amp_boot_network_list - - desired_subnet_to_net_map = {} - for mgmt_net_id in management_nets: - for subnet_id in self.network_driver.get_network( - mgmt_net_id).subnets: - desired_subnet_to_net_map[subnet_id] = mgmt_net_id - desired_subnet_to_net_map.update(vip_subnet_to_net_map) - - for pool in loadbalancer.pools: - for member in pool.members: - if (member.subnet_id and - member.provisioning_status != - constants.PENDING_DELETE): - member_network = self.network_driver.get_subnet( - member.subnet_id).network_id - desired_subnet_to_net_map[member.subnet_id] = ( - member_network) - - desired_network_ids = set(desired_subnet_to_net_map.values()) - desired_subnet_ids = set(desired_subnet_to_net_map) - - # Calculate Network deltas - nics = self.network_driver.get_plugged_networks( - amphora.compute_id) - # we don't have two nics in the same network - network_to_nic_map = {nic.network_id: nic for nic in nics} - - plugged_network_ids = set(network_to_nic_map) - - del_ids = plugged_network_ids - desired_network_ids - delete_nics = [n_data_models.Interface( - network_id=net_id, - port_id=network_to_nic_map[net_id].port_id) - for net_id in del_ids] - - add_ids = desired_network_ids - plugged_network_ids - add_nics = [n_data_models.Interface( - network_id=add_net_id, - fixed_ips=[ - n_data_models.FixedIP( - subnet_id=subnet_id) - for subnet_id, net_id in desired_subnet_to_net_map.items() - if net_id == add_net_id]) - for add_net_id in add_ids] - - # Calculate member Subnet deltas - plugged_subnets = {} - for nic in network_to_nic_map.values(): - for fixed_ip in nic.fixed_ips or []: - plugged_subnets[fixed_ip.subnet_id] = nic.network_id - - plugged_subnet_ids = set(plugged_subnets) - del_subnet_ids = plugged_subnet_ids - desired_subnet_ids - add_subnet_ids = desired_subnet_ids - plugged_subnet_ids - - def _subnet_updates(subnet_ids, subnets): - updates = [] - for s in subnet_ids: - network_id = subnets[s] - nic = network_to_nic_map.get(network_id) - port_id = nic.port_id if nic else None - updates.append({ - constants.SUBNET_ID: s, - constants.NETWORK_ID: network_id, - constants.PORT_ID: port_id - }) - return updates - - add_subnets = _subnet_updates(add_subnet_ids, - desired_subnet_to_net_map) - del_subnets = _subnet_updates(del_subnet_ids, - plugged_subnets) - - delta = n_data_models.Delta( - amphora_id=amphora.id, - compute_id=amphora.compute_id, - add_nics=add_nics, delete_nics=delete_nics, - add_subnets=add_subnets, - delete_subnets=del_subnets) - return delta - - -class CalculateDelta(BaseNetworkTask): - """Task to calculate the delta between - - the nics on the amphora and the ones - we need. Returns a list for - plumbing them. - """ - - default_provides = constants.DELTAS - - def execute(self, loadbalancer, availability_zone): - """Compute which NICs need to be plugged - - for the amphora to become operational. - - :param loadbalancer: the loadbalancer to calculate deltas for all - amphorae - :param availability_zone: availability zone metadata dict - - :returns: dict of octavia.network.data_models.Delta keyed off amphora - id - """ - - calculate_amp = CalculateAmphoraDelta() - deltas = {} - for amphora in filter( - lambda amp: amp.status == constants.AMPHORA_ALLOCATED, - loadbalancer.amphorae): - - delta = calculate_amp.execute(loadbalancer, amphora, - availability_zone) - deltas[amphora.id] = delta - return deltas - - -class GetPlumbedNetworks(BaseNetworkTask): - """Task to figure out the NICS on an amphora. - - This will likely move into the amphora driver - :returns: Array of networks - """ - - default_provides = constants.NICS - - def execute(self, amphora): - """Get plumbed networks for the amphora.""" - - LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id) - - return self.network_driver.get_plugged_networks(amphora.compute_id) - - -class PlugNetworks(BaseNetworkTask): - """Task to plug the networks. - - This uses the delta to add all missing networks/nics - """ - - def execute(self, amphora, delta): - """Update the amphora networks for the delta.""" - - LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id) - - if not delta: - LOG.debug("No network deltas for amphora id: %s", amphora.id) - return - - # add nics - for nic in delta.add_nics: - self.network_driver.plug_network(amphora.compute_id, - nic.network_id) - - def revert(self, amphora, delta, *args, **kwargs): - """Handle a failed network plug by removing all nics added.""" - - LOG.warning("Unable to plug networks for amp id %s", amphora.id) - if not delta: - return - - for nic in delta.add_nics: - try: - self.network_driver.unplug_network(amphora.compute_id, - nic.network_id) - except base.NetworkNotFound: - pass - - -class UnPlugNetworks(BaseNetworkTask): - """Task to unplug the networks - - Loop over all nics and unplug them - based on delta - """ - - def execute(self, amphora, delta): - """Unplug the networks.""" - - LOG.debug("Unplug network for amphora") - if not delta: - LOG.debug("No network deltas for amphora id: %s", amphora.id) - return - - for nic in delta.delete_nics: - try: - self.network_driver.unplug_network(amphora.compute_id, - nic.network_id) - except base.NetworkNotFound: - LOG.debug("Network %d not found", nic.network_id) - except Exception: - LOG.exception("Unable to unplug network") - # TODO(xgerman) follow up if that makes sense - - -class GetMemberPorts(BaseNetworkTask): - - def execute(self, loadbalancer, amphora): - vip_port = self.network_driver.get_port(loadbalancer.vip.port_id) - member_ports = [] - interfaces = self.network_driver.get_plugged_networks( - amphora.compute_id) - for interface in interfaces: - port = self.network_driver.get_port(interface.port_id) - if vip_port.network_id == port.network_id: - continue - port.network = self.network_driver.get_network(port.network_id) - for fixed_ip in port.fixed_ips: - if amphora.lb_network_ip == fixed_ip.ip_address: - break - fixed_ip.subnet = self.network_driver.get_subnet( - fixed_ip.subnet_id) - # Only add the port to the list if the IP wasn't the mgmt IP - else: - member_ports.append(port) - return member_ports - - -class HandleNetworkDelta(BaseNetworkTask): - """Task to plug and unplug networks - - Plug or unplug networks based on delta - """ - - def _fill_port_info(self, port): - port.network = self.network_driver.get_network(port.network_id) - for fixed_ip in port.fixed_ips: - fixed_ip.subnet = self.network_driver.get_subnet( - fixed_ip.subnet_id) - - def execute(self, amphora, delta): - """Handle network plugging based off deltas.""" - updated_ports = {} - for nic in delta.add_nics: - subnet_id = nic.fixed_ips[0].subnet_id - interface = self.network_driver.plug_network( - amphora.compute_id, nic.network_id) - port = self.network_driver.get_port(interface.port_id) - # nova may plugged undesired subnets (it plugs one of the subnets - # of the network), we can safely unplug the subnets we don't need, - # the desired subnet will be added in the 'ADD_SUBNETS' loop. - extra_subnets = [ - fixed_ip.subnet_id - for fixed_ip in port.fixed_ips - if fixed_ip.subnet_id != subnet_id] - for subnet_id in extra_subnets: - port = self.network_driver.unplug_fixed_ip( - port_id=interface.port_id, subnet_id=subnet_id) - self._fill_port_info(port) - updated_ports[port.network_id] = port - - for update in delta.add_subnets: - network_id = update[constants.NETWORK_ID] - # Get already existing port from Deltas or - # newly created port from updated_ports dict - port_id = (update[constants.PORT_ID] or - updated_ports[network_id].id) - subnet_id = update[constants.SUBNET_ID] - # Avoid duplicated subnets - has_subnet = False - if network_id in updated_ports: - has_subnet = any( - fixed_ip.subnet_id == subnet_id - for fixed_ip in updated_ports[network_id].fixed_ips) - if not has_subnet: - port = self.network_driver.plug_fixed_ip( - port_id=port_id, subnet_id=subnet_id) - self._fill_port_info(port) - updated_ports[network_id] = port - - for update in delta.delete_subnets: - network_id = update[constants.NETWORK_ID] - port_id = update[constants.PORT_ID] - subnet_id = update[constants.SUBNET_ID] - port = self.network_driver.unplug_fixed_ip( - port_id=port_id, subnet_id=subnet_id) - self._fill_port_info(port) - # In neutron, when removing an ipv6 subnet (with slaac) from a - # port, it just ignores it. - # https://bugs.launchpad.net/neutron/+bug/1945156 - # When it happens, don't add the port to the updated_ports dict - has_subnet = any( - fixed_ip.subnet_id == subnet_id - for fixed_ip in port.fixed_ips) - if not has_subnet: - updated_ports[network_id] = port - - for nic in delta.delete_nics: - network_id = nic.network_id - try: - self.network_driver.unplug_network( - amphora.compute_id, network_id) - except base.NetworkNotFound: - LOG.debug("Network %s not found", network_id) - except Exception: - LOG.exception("Unable to unplug network") - - port_id = nic.port_id - try: - self.network_driver.delete_port(port_id) - except Exception: - LOG.exception("Unable to delete the port") - - updated_ports.pop(network_id, None) - return {amphora.id: list(updated_ports.values())} - - def revert(self, result, amphora, delta, *args, **kwargs): - """Handle a network plug or unplug failures.""" - - if isinstance(result, failure.Failure): - return - - if not delta: - return - - LOG.warning("Unable to plug networks for amp id %s", - delta.amphora_id) - - for nic in delta.add_nics: - try: - self.network_driver.unplug_network(delta.compute_id, - nic.network_id) - except Exception: - LOG.exception("Unable to unplug network %s", - nic.network_id) - - port_id = nic.port_id - try: - self.network_driver.delete_port(port_id) - except Exception: - LOG.exception("Unable to delete port %s", port_id) - - -class HandleNetworkDeltas(BaseNetworkTask): - """Task to plug and unplug networks - - Loop through the deltas and plug or unplug - networks based on delta - """ - - def execute(self, deltas, loadbalancer): - """Handle network plugging based off deltas.""" - amphorae = {amp.id: amp for amp in loadbalancer.amphorae} - - updated_ports = {} - handle_delta = HandleNetworkDelta() - - for amp_id, delta in deltas.items(): - ret = handle_delta.execute(amphorae[amp_id], delta) - updated_ports.update(ret) - - return updated_ports - - def revert(self, result, deltas, *args, **kwargs): - """Handle a network plug or unplug failures.""" - - if isinstance(result, failure.Failure): - return - - if not deltas: - return - - for amp_id, delta in deltas.items(): - LOG.warning("Unable to plug networks for amp id %s", - delta.amphora_id) - if not delta: - return - - for nic in delta.add_nics: - try: - self.network_driver.unplug_network(delta.compute_id, - nic.network_id) - except Exception: - LOG.exception("Unable to unplug network %s", - nic.network_id) - - port_id = nic.port_id - try: - self.network_driver.delete_port(port_id) - except Exception: - LOG.exception("Unable to delete port %s", port_id) - - -class PlugVIP(BaseNetworkTask): - """Task to plumb a VIP.""" - - def execute(self, loadbalancer): - """Plumb a vip to an amphora.""" - - LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id) - - amps_data = self.network_driver.plug_vip(loadbalancer, - loadbalancer.vip) - return amps_data - - def revert(self, result, loadbalancer, *args, **kwargs): - """Handle a failure to plumb a vip.""" - - if isinstance(result, failure.Failure): - return - LOG.warning("Unable to plug VIP for loadbalancer id %s", - loadbalancer.id) - - try: - # Make sure we have the current port IDs for cleanup - for amp_data in result: - for amphora in filter( - # pylint: disable=cell-var-from-loop - lambda amp: amp.id == amp_data.id, - loadbalancer.amphorae): - amphora.vrrp_port_id = amp_data.vrrp_port_id - amphora.ha_port_id = amp_data.ha_port_id - - self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) - except Exception as e: - LOG.error("Failed to unplug VIP. Resources may still " - "be in use from vip: %(vip)s due to error: %(except)s", - {'vip': loadbalancer.vip.ip_address, 'except': str(e)}) - - -class UpdateVIPSecurityGroup(BaseNetworkTask): - """Task to setup SG for LB.""" - - def execute(self, loadbalancer_id): - """Task to setup SG for LB. - - Task is idempotent and safe to retry. - """ - - LOG.debug("Setting up VIP SG for load balancer id: %s", - loadbalancer_id) - - loadbalancer = self.lb_repo.get(db_apis.get_session(), - id=loadbalancer_id) - - sg_id = self.network_driver.update_vip_sg(loadbalancer, - loadbalancer.vip) - LOG.info("Set up VIP SG %s for load balancer %s complete", - sg_id if sg_id else "None", loadbalancer_id) - return sg_id - - -class GetSubnetFromVIP(BaseNetworkTask): - """Task to plumb a VIP.""" - - def execute(self, loadbalancer): - """Plumb a vip to an amphora.""" - - LOG.debug("Getting subnet for LB: %s", loadbalancer.id) - - subnet = self.network_driver.get_subnet(loadbalancer.vip.subnet_id) - LOG.info("Got subnet %s for load balancer %s", - loadbalancer.vip.subnet_id if subnet else "None", - loadbalancer.id) - return subnet - - -class PlugVIPAmpphora(BaseNetworkTask): - """Task to plumb a VIP.""" - - def execute(self, loadbalancer, amphora, subnet): - """Plumb a vip to an amphora.""" - - LOG.debug("Plumbing VIP for amphora id: %s", amphora.id) - - amp_data = self.network_driver.plug_aap_port( - loadbalancer, loadbalancer.vip, amphora, subnet) - return amp_data - - def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs): - """Handle a failure to plumb a vip.""" - - if isinstance(result, failure.Failure): - return - LOG.warning("Unable to plug VIP for amphora id %s " - "load balancer id %s", - amphora.id, loadbalancer.id) - - try: - amphora.vrrp_port_id = result.vrrp_port_id - amphora.ha_port_id = result.ha_port_id - - self.network_driver.unplug_aap_port(loadbalancer.vip, - amphora, subnet) - except Exception as e: - LOG.error('Failed to unplug AAP port. Resources may still be in ' - 'use for VIP: %s due to error: %s', loadbalancer.vip, - str(e)) - - -class UnplugVIP(BaseNetworkTask): - """Task to unplug the vip.""" - - def execute(self, loadbalancer): - """Unplug the vip.""" - - LOG.debug("Unplug vip on amphora") - try: - self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip) - except Exception: - LOG.exception("Unable to unplug vip from load balancer %s", - loadbalancer.id) - - -class AllocateVIP(BaseNetworkTask): - """Task to allocate a VIP.""" - - def execute(self, loadbalancer): - """Allocate a vip to the loadbalancer.""" - - LOG.debug("Allocating vip port id %s, subnet id %s, ip address %s for " - "load balancer %s", - loadbalancer.vip.port_id, - loadbalancer.vip.subnet_id, - loadbalancer.vip.ip_address, - loadbalancer.id) - # allocated_vips returns (vip, add_vips), skipping the 2nd element as - # amphorav1 doesn't support add_vips - vip = self.network_driver.allocate_vip(loadbalancer)[0] - LOG.info("Allocated vip with port id %s, subnet id %s, ip address %s " - "for load balancer %s", - loadbalancer.vip.port_id, - loadbalancer.vip.subnet_id, - loadbalancer.vip.ip_address, - loadbalancer.id) - return vip - - def revert(self, result, loadbalancer, *args, **kwargs): - """Handle a failure to allocate vip.""" - - if isinstance(result, failure.Failure): - LOG.exception("Unable to allocate VIP") - return - vip = result - LOG.warning("Deallocating vip %s", vip.ip_address) - try: - self.network_driver.deallocate_vip(vip) - except Exception as e: - LOG.error("Failed to deallocate VIP. Resources may still " - "be in use from vip: %(vip)s due to error: %(except)s", - {'vip': vip.ip_address, 'except': str(e)}) - - -class AllocateVIPforFailover(AllocateVIP): - """Task to allocate/validate the VIP for a failover flow.""" - - def revert(self, result, loadbalancer, *args, **kwargs): - """Handle a failure to allocate vip.""" - - if isinstance(result, failure.Failure): - LOG.exception("Unable to allocate VIP") - return - vip = result - LOG.info("Failover revert is not deallocating vip %s because this is " - "a failover.", vip.ip_address) - - -class DeallocateVIP(BaseNetworkTask): - """Task to deallocate a VIP.""" - - def execute(self, loadbalancer): - """Deallocate a VIP.""" - - LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address) - - # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers - # will need access to the load balancer that the vip is/was attached - # to. However the data model serialization for the vip does not give a - # backref to the loadbalancer if accessed through the loadbalancer. - vip = loadbalancer.vip - vip.load_balancer = loadbalancer - self.network_driver.deallocate_vip(vip) - - -class UpdateVIP(BaseNetworkTask): - """Task to update a VIP.""" - - def execute(self, loadbalancer): - LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id) - - self.network_driver.update_vip(loadbalancer) - - -class UpdateVIPForDelete(BaseNetworkTask): - """Task to update a VIP for listener delete flows.""" - - def execute(self, loadbalancer): - LOG.debug("Updating VIP for listener delete on load_balancer %s.", - loadbalancer.id) - - self.network_driver.update_vip(loadbalancer, for_delete=True) - - -class GetAmphoraNetworkConfigs(BaseNetworkTask): - """Task to retrieve amphora network details.""" - - def execute(self, loadbalancer, amphora=None): - LOG.debug("Retrieving vip network details.") - return self.network_driver.get_network_configs(loadbalancer, - amphora=amphora) - - -class GetAmphoraNetworkConfigsByID(BaseNetworkTask): - """Task to retrieve amphora network details.""" - - def execute(self, loadbalancer_id, amphora_id=None): - LOG.debug("Retrieving vip network details.") - amp_repo = repositories.AmphoraRepository() - loadbalancer = self.lb_repo.get(db_apis.get_session(), - id=loadbalancer_id) - amphora = amp_repo.get(db_apis.get_session(), id=amphora_id) - return self.network_driver.get_network_configs(loadbalancer, - amphora=amphora) - - -class GetAmphoraeNetworkConfigs(BaseNetworkTask): - """Task to retrieve amphorae network details.""" - - def execute(self, loadbalancer_id): - LOG.debug("Retrieving vip network details.") - loadbalancer = self.lb_repo.get(db_apis.get_session(), - id=loadbalancer_id) - return self.network_driver.get_network_configs(loadbalancer) - - -class FailoverPreparationForAmphora(BaseNetworkTask): - """Task to prepare an amphora for failover.""" - - def execute(self, amphora): - LOG.debug("Prepare amphora %s for failover.", amphora.id) - - self.network_driver.failover_preparation(amphora) - - -class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask): - """Task retrieving all the port ids on an amphora, except lb network.""" - - def execute(self, amphora): - LOG.debug("Retrieve all but the lb network port id on amphora %s.", - amphora.id) - - interfaces = self.network_driver.get_plugged_networks( - compute_id=amphora.compute_id) - - ports = [] - for interface_ in interfaces: - if interface_.port_id not in ports: - port = self.network_driver.get_port(port_id=interface_.port_id) - ips = port.fixed_ips - lb_network = False - for ip in ips: - if ip.ip_address == amphora.lb_network_ip: - lb_network = True - if not lb_network: - ports.append(port) - - return ports - - -class PlugPorts(BaseNetworkTask): - """Task to plug neutron ports into a compute instance.""" - - def execute(self, amphora, ports): - for port in ports: - LOG.debug('Plugging port ID: %(port_id)s into compute instance: ' - '%(compute_id)s.', - {'port_id': port.id, 'compute_id': amphora.compute_id}) - self.network_driver.plug_port(amphora, port) - - -class ApplyQos(BaseNetworkTask): - """Apply Quality of Services to the VIP""" - - def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, - is_revert=False, request_qos_id=None): - """Call network driver to apply QoS Policy on the vrrp ports.""" - if not amps_data: - amps_data = loadbalancer.amphorae - - amps_data = [amp - for amp in amps_data - if amp.status == constants.AMPHORA_ALLOCATED] - - apply_qos = ApplyQosAmphora() - for amp_data in amps_data: - apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data, - qos_policy_id) - - def execute(self, loadbalancer, amps_data=None, update_dict=None): - """Apply qos policy on the vrrp ports which are related with vip.""" - qos_policy_id = loadbalancer.vip.qos_policy_id - if not qos_policy_id and ( - not update_dict or ( - 'vip' not in update_dict or - 'qos_policy_id' not in update_dict['vip'])): - return - self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) - - def revert(self, result, loadbalancer, amps_data=None, update_dict=None, - *args, **kwargs): - """Handle a failure to apply QoS to VIP""" - request_qos_id = loadbalancer.vip.qos_policy_id - orig_lb = self.task_utils.get_current_loadbalancer_from_db( - loadbalancer.id) - orig_qos_id = orig_lb.vip.qos_policy_id - if request_qos_id != orig_qos_id: - self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id, - is_revert=True, - request_qos_id=request_qos_id) - - -class ApplyQosAmphora(BaseNetworkTask): - """Apply Quality of Services to the VIP""" - - def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id, - is_revert=False, request_qos_id=None): - """Call network driver to apply QoS Policy on the vrrp ports.""" - try: - self.network_driver.apply_qos_on_port(qos_policy_id, - amp_data.vrrp_port_id) - except Exception: - if not is_revert: - raise - LOG.warning('Failed to undo qos policy %(qos_id)s ' - 'on vrrp port: %(port)s from ' - 'amphorae: %(amp)s', - {'qos_id': request_qos_id, - 'port': amp_data.vrrp_port_id, - 'amp': [amp.id for amp in amp_data]}) - - def execute(self, loadbalancer, amp_data=None, update_dict=None): - """Apply qos policy on the vrrp ports which are related with vip.""" - qos_policy_id = loadbalancer.vip.qos_policy_id - if not qos_policy_id and ( - update_dict and ( - 'vip' not in update_dict or - 'qos_policy_id' not in update_dict['vip'])): - return - self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id) - - def revert(self, result, loadbalancer, amp_data=None, update_dict=None, - *args, **kwargs): - """Handle a failure to apply QoS to VIP""" - try: - request_qos_id = loadbalancer.vip.qos_policy_id - orig_lb = self.task_utils.get_current_loadbalancer_from_db( - loadbalancer.id) - orig_qos_id = orig_lb.vip.qos_policy_id - if request_qos_id != orig_qos_id: - self._apply_qos_on_vrrp_port(loadbalancer, amp_data, - orig_qos_id, is_revert=True, - request_qos_id=request_qos_id) - except Exception as e: - LOG.error('Failed to remove QoS policy: %s from port: %s due ' - 'to error: %s', orig_qos_id, amp_data.vrrp_port_id, - str(e)) - - -class DeletePort(BaseNetworkTask): - """Task to delete a network port.""" - - @tenacity.retry(retry=tenacity.retry_if_exception_type(), - stop=tenacity.stop_after_attempt( - CONF.networking.max_retries), - wait=tenacity.wait_exponential( - multiplier=CONF.networking.retry_backoff, - min=CONF.networking.retry_interval, - max=CONF.networking.retry_max), reraise=True) - def execute(self, port_id, passive_failure=False): - """Delete the network port.""" - if port_id is None: - return - if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1: - LOG.debug("Deleting network port %s", port_id) - else: - LOG.warning('Retrying network port %s delete attempt %s of %s.', - port_id, - self.execute.retry.statistics[ - constants.ATTEMPT_NUMBER], - self.execute.retry.stop.max_attempt_number) - # Let the Taskflow engine know we are working and alive - # Don't use get with a default for 'attempt_number', we need to fail - # if that number is missing. - self.update_progress( - self.execute.retry.statistics[constants.ATTEMPT_NUMBER] / - self.execute.retry.stop.max_attempt_number) - try: - self.network_driver.delete_port(port_id) - except Exception: - if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] != - self.execute.retry.stop.max_attempt_number): - LOG.warning('Network port delete for port id: %s failed. ' - 'Retrying.', port_id) - raise - if passive_failure: - LOG.exception('Network port delete for port ID: %s failed. ' - 'This resource will be abandoned and should ' - 'manually be cleaned up once the ' - 'network service is functional.', port_id) - # Let's at least attempt to disable it so if the instance - # comes back from the dead it doesn't conflict with anything. - try: - self.network_driver.admin_down_port(port_id) - LOG.info('Successfully disabled (admin down) network port ' - '%s that failed to delete.', port_id) - except Exception: - LOG.warning('Attempt to disable (admin down) network port ' - '%s failed. The network service has failed. ' - 'Continuing.', port_id) - else: - LOG.exception('Network port delete for port ID: %s failed. ' - 'The network service has failed. ' - 'Aborting and reverting.', port_id) - raise - - -class CreateVIPBasePort(BaseNetworkTask): - """Task to create the VIP base port for an amphora.""" - - @tenacity.retry(retry=tenacity.retry_if_exception_type(), - stop=tenacity.stop_after_attempt( - CONF.networking.max_retries), - wait=tenacity.wait_exponential( - multiplier=CONF.networking.retry_backoff, - min=CONF.networking.retry_interval, - max=CONF.networking.retry_max), reraise=True) - def execute(self, vip, vip_sg_id, amphora_id): - port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id - fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}] - sg_id = [] - if vip_sg_id: - sg_id = [vip_sg_id] - port = self.network_driver.create_port( - vip.network_id, name=port_name, fixed_ips=fixed_ips, - secondary_ips=[vip.ip_address], security_group_ids=sg_id, - qos_policy_id=vip.qos_policy_id) - LOG.info('Created port %s with ID %s for amphora %s', - port_name, port.id, amphora_id) - return port - - def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs): - if isinstance(result, failure.Failure): - return - try: - port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id - for port in result: - self.network_driver.delete_port(port.id) - LOG.info('Deleted port %s with ID %s for amphora %s due to a ' - 'revert.', port_name, port.id, amphora_id) - except Exception as e: - LOG.error('Failed to delete port %s. Resources may still be in ' - 'use for a port intended for amphora %s due to error ' - '%s. Search for a port named %s', - result, amphora_id, str(e), port_name) - - -class AdminDownPort(BaseNetworkTask): - - def execute(self, port_id): - try: - self.network_driver.set_port_admin_state_up(port_id, False) - except base.PortNotFound: - return - for i in range(CONF.networking.max_retries): - port = self.network_driver.get_port(port_id) - if port.status == constants.DOWN: - LOG.debug('Disabled port: %s', port_id) - return - LOG.debug('Port %s is %s instead of DOWN, waiting.', - port_id, port.status) - time.sleep(CONF.networking.retry_interval) - LOG.error('Port %s failed to go DOWN. Port status is still %s. ' - 'Ignoring and continuing.', port_id, port.status) - - def revert(self, result, port_id, *args, **kwargs): - if isinstance(result, failure.Failure): - return - try: - self.network_driver.set_port_admin_state_up(port_id, True) - except Exception as e: - LOG.error('Failed to bring port %s admin up on revert due to: %s.', - port_id, str(e)) - - -class GetVIPSecurityGroupID(BaseNetworkTask): - - def execute(self, loadbalancer_id): - sg_name = utils.get_vip_security_group_name(loadbalancer_id) - try: - security_group = self.network_driver.get_security_group(sg_name) - if security_group: - return security_group.id - except base.SecurityGroupNotFound: - with excutils.save_and_reraise_exception() as ctxt: - if self.network_driver.sec_grp_enabled: - LOG.error('VIP security group %s was not found.', sg_name) - else: - ctxt.reraise = False - return None diff --git a/octavia/controller/worker/v1/tasks/retry_tasks.py b/octavia/controller/worker/v1/tasks/retry_tasks.py deleted file mode 100644 index a17bf6dc42..0000000000 --- a/octavia/controller/worker/v1/tasks/retry_tasks.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2019 Red Hat, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_log import log as logging -from taskflow import retry - -LOG = logging.getLogger(__name__) - - -class SleepingRetryTimesController(retry.Times): - """A retry controller to attempt subflow retries a number of times. - - This retry controller overrides the Times on_failure to inject a - sleep interval between retries. - It also adds a log message when all of the retries are exhausted. - - :param attempts: number of attempts to retry the associated subflow - before giving up - :type attempts: int - :param name: Meaningful name for this atom, should be something that is - distinguishable and understandable for notification, - debugging, storing and any other similar purposes. - :param provides: A set, string or list of items that - this will be providing (or could provide) to others, used - to correlate and associate the thing/s this atom - produces, if it produces anything at all. - :param requires: A set or list of required inputs for this atom's - ``execute`` method. - :param rebind: A dict of key/value pairs used to define argument - name conversions for inputs to this atom's ``execute`` - method. - :param revert_all: when provided this will cause the full flow to revert - when the number of attempts that have been tried - has been reached (when false, it will only locally - revert the associated subflow) - :type revert_all: bool - :param interval: Interval, in seconds, between retry attempts. - :type interval: int - """ - - def __init__(self, attempts=1, name=None, provides=None, requires=None, - auto_extract=True, rebind=None, revert_all=False, interval=1): - super().__init__( - attempts, name, provides, requires, auto_extract, rebind, - revert_all) - self._interval = interval - - def on_failure(self, history, *args, **kwargs): - if len(history) < self._attempts: - LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and ' - 'retrying.', - self.name[self.name.startswith('retry-') and - len('retry-'):], len(history), - self._attempts, self._interval) - time.sleep(self._interval) - return retry.RETRY - return self._revert_action - - def revert(self, history, *args, **kwargs): - LOG.error('%s retries with interval %s seconds have failed for %s. ' - 'Giving up.', len(history), self._interval, self.name) diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py b/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py deleted file mode 100644 index 370da0bb99..0000000000 --- a/octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright 2018 Rackspace, US Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from octavia_lib.api.drivers import data_models as driver_dm -from octavia_lib.api.drivers import exceptions -from oslo_utils import uuidutils - -from octavia.api.drivers.amphora_driver.v1 import driver -from octavia.common import constants as consts -from octavia.network import base as network_base -from octavia.tests.common import sample_data_models -from octavia.tests.unit import base - - -class TestAmphoraDriver(base.TestRpc): - def setUp(self): - super().setUp() - self.amp_driver = driver.AmphoraProviderDriver() - self.sample_data = sample_data_models.SampleDriverDataModels() - - @mock.patch('octavia.common.utils.get_network_driver') - def test_create_vip_port(self, mock_get_net_driver): - mock_net_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_net_driver - mock_net_driver.allocate_vip.return_value = (self.sample_data.db_vip, - []) - - provider_vip_dict, add_vips = self.amp_driver.create_vip_port( - self.sample_data.lb_id, self.sample_data.project_id, - self.sample_data.provider_vip_dict) - - self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict) - self.assertEqual([], add_vips) - - @mock.patch('octavia.common.utils.get_network_driver') - def test_create_vip_port_without_port_security_enabled( - self, mock_get_net_driver): - mock_net_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_net_driver - network = mock.MagicMock() - network.port_security_enabled = False - mock_net_driver.get_network.return_value = network - mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip - - self.assertRaises(exceptions.DriverError, - self.amp_driver.create_vip_port, - self.sample_data.lb_id, self.sample_data.project_id, - self.sample_data.provider_vip_dict) - - @mock.patch('octavia.common.utils.get_network_driver') - def test_create_vip_port_failed(self, mock_get_net_driver): - mock_net_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_net_driver - mock_net_driver.allocate_vip.side_effect = ( - network_base.AllocateVIPException()) - - self.assertRaises(exceptions.DriverError, - self.amp_driver.create_vip_port, - self.sample_data.lb_id, self.sample_data.project_id, - self.sample_data.provider_vip_dict) - - @mock.patch('octavia.common.utils.get_network_driver') - def test_create_vip_with_additional_vips(self, mock_get_net_driver): - mock_net_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_net_driver - mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip - - additional_vips = [{ - consts.SUBNET_ID: uuidutils.generate_uuid() - }] - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.create_vip_port, - self.sample_data.lb_id, self.sample_data.project_id, - self.sample_data.provider_vip_dict, additional_vips) - - # Load Balancer - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_create(self, mock_cast): - provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id) - self.amp_driver.loadbalancer_create(provider_lb) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.FLAVOR: None, - consts.AVAILABILITY_ZONE: None} - mock_cast.assert_called_with({}, 'create_load_balancer', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_delete(self, mock_cast): - provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id) - self.amp_driver.loadbalancer_delete(provider_lb) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - 'cascade': False} - mock_cast.assert_called_with({}, 'delete_load_balancer', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_failover(self, mock_cast): - self.amp_driver.loadbalancer_failover(self.sample_data.lb_id) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id} - mock_cast.assert_called_with({}, 'failover_load_balancer', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_update(self, mock_cast): - old_provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id) - provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id, admin_state_up=True) - lb_dict = {'enabled': True} - self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.LOAD_BALANCER_UPDATES: lb_dict} - mock_cast.assert_called_with({}, 'update_load_balancer', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_update_name(self, mock_cast): - old_provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id) - provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id, name='Great LB') - lb_dict = {'name': 'Great LB'} - self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.LOAD_BALANCER_UPDATES: lb_dict} - mock_cast.assert_called_with({}, 'update_load_balancer', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_loadbalancer_update_qos(self, mock_cast): - qos_policy_id = uuidutils.generate_uuid() - old_provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id) - provider_lb = driver_dm.LoadBalancer( - loadbalancer_id=self.sample_data.lb_id, - vip_qos_policy_id=qos_policy_id) - lb_dict = {'vip': {'qos_policy_id': qos_policy_id}} - self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb) - payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.LOAD_BALANCER_UPDATES: lb_dict} - mock_cast.assert_called_with({}, 'update_load_balancer', **payload) - - # Listener - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_create(self, mock_cast): - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, - protocol=consts.PROTOCOL_HTTPS, - alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) - self.amp_driver.listener_create(provider_listener) - payload = {consts.LISTENER_ID: self.sample_data.listener1_id} - mock_cast.assert_called_with({}, 'create_listener', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_create_unsupported_alpn(self, mock_cast): - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, - protocol=consts.PROTOCOL_HTTPS) - provider_listener.alpn_protocols = ['http/1.1', 'eureka'] - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.listener_create, - provider_listener) - mock_cast.assert_not_called() - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_create_unsupported_protocol(self, mock_cast): - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, - protocol='UNSUPPORTED_PROTO') - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.listener_create, - provider_listener) - mock_cast.assert_not_called() - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_delete(self, mock_cast): - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id) - self.amp_driver.listener_delete(provider_listener) - payload = {consts.LISTENER_ID: self.sample_data.listener1_id} - mock_cast.assert_called_with({}, 'delete_listener', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_update(self, mock_cast): - old_provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id) - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, admin_state_up=False) - listener_dict = {'enabled': False} - self.amp_driver.listener_update(old_provider_listener, - provider_listener) - payload = {consts.LISTENER_ID: self.sample_data.listener1_id, - consts.LISTENER_UPDATES: listener_dict} - mock_cast.assert_called_with({}, 'update_listener', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_update_name(self, mock_cast): - old_provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id) - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, name='Great Listener') - listener_dict = {'name': 'Great Listener'} - self.amp_driver.listener_update(old_provider_listener, - provider_listener) - payload = {consts.LISTENER_ID: self.sample_data.listener1_id, - consts.LISTENER_UPDATES: listener_dict} - mock_cast.assert_called_with({}, 'update_listener', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_listener_update_unsupported_alpn(self, mock_cast): - old_provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id) - provider_listener = driver_dm.Listener( - listener_id=self.sample_data.listener1_id, - alpn_protocols=['http/1.1', 'eureka']) - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.listener_update, - old_provider_listener, - provider_listener) - - # Pool - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_create(self, mock_cast): - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id, - lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN, - alpn_protocols=consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS) - self.amp_driver.pool_create(provider_pool) - payload = {consts.POOL_ID: self.sample_data.pool1_id} - mock_cast.assert_called_with({}, 'create_pool', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_create_unsupported_algorithm(self, mock_cast): - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - provider_pool.lb_algorithm = 'foo' - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.pool_create, - provider_pool) - mock_cast.assert_not_called() - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_create_unsupported_alpn(self, mock_cast): - provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id) - provider_pool.alpn_protocols = ['http/1.1', 'eureka'] - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.pool_create, - provider_pool) - mock_cast.assert_not_called() - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_delete(self, mock_cast): - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - self.amp_driver.pool_delete(provider_pool) - payload = {consts.POOL_ID: self.sample_data.pool1_id} - mock_cast.assert_called_with({}, 'delete_pool', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_update(self, mock_cast): - old_provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id, admin_state_up=True, - ca_tls_container_data='CA DATA', ca_tls_container_ref='CA REF', - crl_container_data='CRL DATA', crl_container_ref='CRL REF', - description='TEST DESCRIPTION', name='TEST NAME', - lb_algorithm=consts.LB_ALGORITHM_SOURCE_IP, - session_persistence='FAKE SP', tls_container_data='TLS DATA', - tls_container_ref='TLS REF', tls_enabled=False) - pool_dict = {'description': 'TEST DESCRIPTION', - 'lb_algorithm': 'SOURCE_IP', 'name': 'TEST NAME', - 'session_persistence': 'FAKE SP', 'tls_enabled': False, - 'enabled': True, 'tls_certificate_id': 'TLS REF', - 'ca_tls_certificate_id': 'CA REF', - 'crl_container_id': 'CRL REF'} - self.amp_driver.pool_update(old_provider_pool, provider_pool) - payload = {consts.POOL_ID: self.sample_data.pool1_id, - consts.POOL_UPDATES: pool_dict} - mock_cast.assert_called_with({}, 'update_pool', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_update_name(self, mock_cast): - old_provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id, name='Great pool', - admin_state_up=True, tls_enabled=True) - pool_dict = {'name': 'Great pool', - 'enabled': True, - 'tls_enabled': True} - self.amp_driver.pool_update(old_provider_pool, provider_pool) - payload = {consts.POOL_ID: self.sample_data.pool1_id, - consts.POOL_UPDATES: pool_dict} - mock_cast.assert_called_with({}, 'update_pool', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_update_unsupported_algorithm(self, mock_cast): - old_provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - provider_pool = driver_dm.Pool( - pool_id=self.sample_data.pool1_id) - provider_pool.lb_algorithm = 'foo' - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.pool_update, - old_provider_pool, - provider_pool) - mock_cast.assert_not_called() - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_pool_update_unsupported_alpn(self, mock_cast): - old_provider_pool = driver_dm.Pool(pool_id=self.sample_data.pool1_id) - provider_pool = driver_dm.Pool( - listener_id=self.sample_data.pool1_id, - alpn_protocols=['http/1.1', 'eureka']) - self.assertRaises( - exceptions.UnsupportedOptionError, - self.amp_driver.pool_update, - old_provider_pool, - provider_pool) - - # Member - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_create(self, mock_cast, mock_pool_get, mock_session): - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id) - self.amp_driver.member_create(provider_member) - payload = {consts.MEMBER_ID: self.sample_data.member1_id} - mock_cast.assert_called_with({}, 'create_member', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get, - mock_session): - mock_lb = mock.MagicMock() - mock_lb.vip = mock.MagicMock() - mock_lb.vip.ip_address = "192.0.1.1" - mock_listener = mock.MagicMock() - mock_listener.load_balancer = mock_lb - mock_pool = mock.MagicMock() - mock_pool.protocol = consts.PROTOCOL_UDP - mock_pool.listeners = [mock_listener] - mock_pool_get.return_value = mock_pool - - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id, - address="192.0.2.1") - self.amp_driver.member_create(provider_member) - payload = {consts.MEMBER_ID: self.sample_data.member1_id} - mock_cast.assert_called_with({}, 'create_member', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, - mock_session): - mock_lb = mock.MagicMock() - mock_lb.vip = mock.MagicMock() - mock_lb.vip.ip_address = "fe80::1" - mock_listener = mock.MagicMock() - mock_listener.load_balancer = mock_lb - mock_pool = mock.MagicMock() - mock_pool.protocol = consts.PROTOCOL_UDP - mock_pool.listeners = [mock_listener] - mock_pool_get.return_value = mock_pool - - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id, - address="192.0.2.1") - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.member_create, - provider_member) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_delete(self, mock_cast): - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id) - self.amp_driver.member_delete(provider_member) - payload = {consts.MEMBER_ID: self.sample_data.member1_id} - mock_cast.assert_called_with({}, 'delete_member', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_update(self, mock_cast): - old_provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id) - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id, admin_state_up=True) - member_dict = {'enabled': True} - self.amp_driver.member_update(old_provider_member, provider_member) - payload = {consts.MEMBER_ID: self.sample_data.member1_id, - consts.MEMBER_UPDATES: member_dict} - mock_cast.assert_called_with({}, 'update_member', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_update_name(self, mock_cast): - old_provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id) - provider_member = driver_dm.Member( - member_id=self.sample_data.member1_id, name='Great member') - member_dict = {'name': 'Great member'} - self.amp_driver.member_update(old_provider_member, provider_member) - payload = {consts.MEMBER_ID: self.sample_data.member1_id, - consts.MEMBER_UPDATES: member_dict} - mock_cast.assert_called_with({}, 'update_member', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session): - mock_pool = mock.MagicMock() - mock_pool.members = self.sample_data.db_pool1_members - mock_pool_get.return_value = mock_pool - - prov_mem_update = driver_dm.Member( - member_id=self.sample_data.member2_id, - pool_id=self.sample_data.pool1_id, admin_state_up=False, - address='192.0.2.17', monitor_address='192.0.2.77', - protocol_port=80, name='updated-member2') - prov_new_member = driver_dm.Member( - member_id=self.sample_data.member3_id, - pool_id=self.sample_data.pool1_id, - address='192.0.2.18', monitor_address='192.0.2.28', - protocol_port=80, name='member3') - prov_members = [prov_mem_update, prov_new_member] - - update_mem_dict = {'ip_address': '192.0.2.17', - 'name': 'updated-member2', - 'monitor_address': '192.0.2.77', - 'id': self.sample_data.member2_id, - 'enabled': False, - 'protocol_port': 80, - 'pool_id': self.sample_data.pool1_id} - - self.amp_driver.member_batch_update( - self.sample_data.pool1_id, prov_members) - - payload = {'old_member_ids': [self.sample_data.member1_id], - 'new_member_ids': [self.sample_data.member3_id], - 'updated_members': [update_mem_dict]} - mock_cast.assert_called_with({}, 'batch_update_members', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_batch_update_no_admin_addr(self, mock_cast, - mock_pool_get, mock_session): - mock_pool = mock.MagicMock() - mock_pool.members = self.sample_data.db_pool1_members - mock_pool_get.return_value = mock_pool - - prov_mem_update = driver_dm.Member( - member_id=self.sample_data.member2_id, - pool_id=self.sample_data.pool1_id, - monitor_address='192.0.2.77', - protocol_port=80, name='updated-member2') - prov_new_member = driver_dm.Member( - member_id=self.sample_data.member3_id, - pool_id=self.sample_data.pool1_id, - address='192.0.2.18', monitor_address='192.0.2.28', - protocol_port=80, name='member3') - prov_members = [prov_mem_update, prov_new_member] - - update_mem_dict = {'name': 'updated-member2', - 'monitor_address': '192.0.2.77', - 'id': self.sample_data.member2_id, - 'protocol_port': 80, - 'pool_id': self.sample_data.pool1_id} - - self.amp_driver.member_batch_update( - self.sample_data.pool1_id, prov_members) - - payload = {'old_member_ids': [self.sample_data.member1_id], - 'new_member_ids': [self.sample_data.member3_id], - 'updated_members': [update_mem_dict]} - mock_cast.assert_called_with({}, 'batch_update_members', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_batch_update_clear_already_empty( - self, mock_cast, mock_pool_get, mock_session): - """Expect that we will pass an empty payload if directed. - - Logic for whether or not to attempt this will be done above the driver - layer, so our driver is responsible to forward the request even if it - is a perceived no-op. - """ - mock_pool = mock.MagicMock() - mock_pool_get.return_value = mock_pool - - self.amp_driver.member_batch_update( - self.sample_data.pool1_id, []) - - payload = {'old_member_ids': [], - 'new_member_ids': [], - 'updated_members': []} - mock_cast.assert_called_with({}, 'batch_update_members', **payload) - - # Health Monitor - @mock.patch('oslo_messaging.RPCClient.cast') - def test_health_monitor_create(self, mock_cast): - provider_HM = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id) - self.amp_driver.health_monitor_create(provider_HM) - payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} - mock_cast.assert_called_with({}, 'create_health_monitor', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_health_monitor_delete(self, mock_cast): - provider_HM = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id) - self.amp_driver.health_monitor_delete(provider_HM) - payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id} - mock_cast.assert_called_with({}, 'delete_health_monitor', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get, - mock_session): - - mock_lb = mock.MagicMock() - mock_lb.vip = mock.MagicMock() - mock_lb.vip.ip_address = "192.0.1.1" - mock_listener = mock.MagicMock() - mock_listener.load_balancer = mock_lb - mock_pool = mock.MagicMock() - mock_pool.protocol = consts.PROTOCOL_UDP - mock_pool.listeners = [mock_listener] - mock_pool.members = self.sample_data.db_pool1_members - mock_pool_get.return_value = mock_pool - - prov_mem_update = driver_dm.Member( - member_id=self.sample_data.member2_id, - pool_id=self.sample_data.pool1_id, admin_state_up=False, - address='192.0.2.17', monitor_address='192.0.2.77', - protocol_port=80, name='updated-member2') - prov_new_member = driver_dm.Member( - member_id=self.sample_data.member3_id, - pool_id=self.sample_data.pool1_id, - address='192.0.2.18', monitor_address='192.0.2.28', - protocol_port=80, name='member3') - prov_members = [prov_mem_update, prov_new_member] - - update_mem_dict = {'ip_address': '192.0.2.17', - 'name': 'updated-member2', - 'monitor_address': '192.0.2.77', - 'id': self.sample_data.member2_id, - 'enabled': False, - 'protocol_port': 80, - 'pool_id': self.sample_data.pool1_id} - - self.amp_driver.member_batch_update( - self.sample_data.pool1_id, prov_members) - - payload = {'old_member_ids': [self.sample_data.member1_id], - 'new_member_ids': [self.sample_data.member3_id], - 'updated_members': [update_mem_dict]} - mock_cast.assert_called_with({}, 'batch_update_members', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.PoolRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get, - mock_session): - - mock_lb = mock.MagicMock() - mock_lb.vip = mock.MagicMock() - mock_lb.vip.ip_address = "192.0.1.1" - mock_listener = mock.MagicMock() - mock_listener.load_balancer = mock_lb - mock_pool = mock.MagicMock() - mock_pool.protocol = consts.PROTOCOL_UDP - mock_pool.listeners = [mock_listener] - mock_pool.members = self.sample_data.db_pool1_members - mock_pool_get.return_value = mock_pool - - prov_mem_update = driver_dm.Member( - member_id=self.sample_data.member2_id, - pool_id=self.sample_data.pool1_id, admin_state_up=False, - address='fe80::1', monitor_address='fe80::2', - protocol_port=80, name='updated-member2') - prov_new_member = driver_dm.Member( - member_id=self.sample_data.member3_id, - pool_id=self.sample_data.pool1_id, - address='192.0.2.18', monitor_address='192.0.2.28', - protocol_port=80, name='member3') - prov_members = [prov_mem_update, prov_new_member] - - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.member_batch_update, - self.sample_data.pool1_id, prov_members) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_health_monitor_update(self, mock_cast): - old_provider_hm = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id) - provider_hm = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True, - max_retries=1, max_retries_down=2) - hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2} - self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) - payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, - consts.HEALTH_MONITOR_UPDATES: hm_dict} - mock_cast.assert_called_with({}, 'update_health_monitor', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_health_monitor_update_name(self, mock_cast): - old_provider_hm = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id) - provider_hm = driver_dm.HealthMonitor( - healthmonitor_id=self.sample_data.hm1_id, name='Great HM') - hm_dict = {'name': 'Great HM'} - self.amp_driver.health_monitor_update(old_provider_hm, provider_hm) - payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id, - consts.HEALTH_MONITOR_UPDATES: hm_dict} - mock_cast.assert_called_with({}, 'update_health_monitor', **payload) - - # L7 Policy - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.ListenerRepository.get') - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7policy_create(self, mock_cast, mock_listener_get, mock_session): - mock_listener = mock.MagicMock() - mock_listener.protocol = consts.PROTOCOL_HTTP - mock_listener_get.return_value = mock_listener - provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id) - self.amp_driver.l7policy_create(provider_l7policy) - payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} - mock_cast.assert_called_with({}, 'create_l7policy', **payload) - - @mock.patch('octavia.db.api.get_session') - @mock.patch('octavia.db.repositories.ListenerRepository.get') - def test_l7policy_create_invalid_listener_protocol(self, mock_listener_get, - mock_session): - mock_listener = mock.MagicMock() - mock_listener.protocol = consts.PROTOCOL_UDP - mock_listener_get.return_value = mock_listener - provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id) - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.l7policy_create, - provider_l7policy) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7policy_delete(self, mock_cast): - provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id) - self.amp_driver.l7policy_delete(provider_l7policy) - payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id} - mock_cast.assert_called_with({}, 'delete_l7policy', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7policy_update(self, mock_cast): - old_provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id) - provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True) - l7policy_dict = {'enabled': True} - self.amp_driver.l7policy_update(old_provider_l7policy, - provider_l7policy) - payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, - consts.L7POLICY_UPDATES: l7policy_dict} - mock_cast.assert_called_with({}, 'update_l7policy', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7policy_update_name(self, mock_cast): - old_provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id) - provider_l7policy = driver_dm.L7Policy( - l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy') - l7policy_dict = {'name': 'Great L7Policy'} - self.amp_driver.l7policy_update(old_provider_l7policy, - provider_l7policy) - payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id, - consts.L7POLICY_UPDATES: l7policy_dict} - mock_cast.assert_called_with({}, 'update_l7policy', **payload) - - # L7 Rules - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7rule_create(self, mock_cast): - provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id) - self.amp_driver.l7rule_create(provider_l7rule) - payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} - mock_cast.assert_called_with({}, 'create_l7rule', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7rule_delete(self, mock_cast): - provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id) - self.amp_driver.l7rule_delete(provider_l7rule) - payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id} - mock_cast.assert_called_with({}, 'delete_l7rule', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7rule_update(self, mock_cast): - old_provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id) - provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True) - l7rule_dict = {'enabled': True} - self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) - payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, - consts.L7RULE_UPDATES: l7rule_dict} - mock_cast.assert_called_with({}, 'update_l7rule', **payload) - - @mock.patch('oslo_messaging.RPCClient.cast') - def test_l7rule_update_invert(self, mock_cast): - old_provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id) - provider_l7rule = driver_dm.L7Rule( - l7rule_id=self.sample_data.l7rule1_id, invert=True) - l7rule_dict = {'invert': True} - self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule) - payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id, - consts.L7RULE_UPDATES: l7rule_dict} - mock_cast.assert_called_with({}, 'update_l7rule', **payload) - - # Flavor - def test_get_supported_flavor_metadata(self): - test_schema = { - "properties": { - "test_name": {"description": "Test description"}, - "test_name2": {"description": "Another description"}}} - ref_dict = {"test_name": "Test description", - "test_name2": "Another description"} - - # mock out the supported_flavor_metadata - with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' - 'SUPPORTED_FLAVOR_SCHEMA', test_schema): - result = self.amp_driver.get_supported_flavor_metadata() - self.assertEqual(ref_dict, result) - - # Test for bad schema - with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' - 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): - self.assertRaises(exceptions.DriverError, - self.amp_driver.get_supported_flavor_metadata) - - def test_validate_flavor(self): - ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE} - self.amp_driver.validate_flavor(ref_dict) - - # Test bad flavor metadata value is bad - ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'} - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.validate_flavor, - ref_dict) - - # Test bad flavor metadata key - ref_dict = {'bogus': 'bogus'} - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.validate_flavor, - ref_dict) - - # Test for bad schema - with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.' - 'SUPPORTED_FLAVOR_SCHEMA', 'bogus'): - self.assertRaises(exceptions.DriverError, - self.amp_driver.validate_flavor, 'bogus') - - # Availability Zone - def test_get_supported_availability_zone_metadata(self): - test_schema = { - "properties": { - "test_name": {"description": "Test description"}, - "test_name2": {"description": "Another description"}}} - ref_dict = {"test_name": "Test description", - "test_name2": "Another description"} - - # mock out the supported_availability_zone_metadata - with mock.patch('octavia.api.drivers.amphora_driver.' - 'availability_zone_schema.' - 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema): - result = self.amp_driver.get_supported_availability_zone_metadata() - self.assertEqual(ref_dict, result) - - # Test for bad schema - with mock.patch('octavia.api.drivers.amphora_driver.' - 'availability_zone_schema.' - 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): - self.assertRaises( - exceptions.DriverError, - self.amp_driver.get_supported_availability_zone_metadata) - - def test_validate_availability_zone(self): - with mock.patch('stevedore.driver.DriverManager.driver') as m_driver: - m_driver.validate_availability_zone.return_value = None - ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'} - self.amp_driver.validate_availability_zone(ref_dict) - - # Test bad availability zone metadata key - ref_dict = {'bogus': 'bogus'} - self.assertRaises(exceptions.UnsupportedOptionError, - self.amp_driver.validate_availability_zone, - ref_dict) - - # Test for bad schema - with mock.patch('octavia.api.drivers.amphora_driver.' - 'availability_zone_schema.' - 'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'): - self.assertRaises(exceptions.DriverError, - self.amp_driver.validate_availability_zone, - 'bogus') diff --git a/octavia/tests/unit/controller/healthmanager/test_health_manager.py b/octavia/tests/unit/controller/healthmanager/test_health_manager.py index bb9a6f82e7..2b1f15eb59 100644 --- a/octavia/tests/unit/controller/healthmanager/test_health_manager.py +++ b/octavia/tests/unit/controller/healthmanager/test_health_manager.py @@ -43,15 +43,13 @@ class TestHealthManager(base.TestCase): super().setUp() @mock.patch('octavia.db.api.wait_for_connection') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.failover_amphora') @mock.patch('octavia.controller.worker.v2.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora') @mock.patch('octavia.db.api.get_session') def test_health_check_stale_amphora(self, session_mock, get_stale_amp_mock, - failover_mockv2, failover_mock, + failover_mock, db_wait_mock): conf = oslo_fixture.Config(cfg.CONF) conf.config(group="health_manager", heartbeat_timeout=5) @@ -87,15 +85,13 @@ class TestHealthManager(base.TestCase): self.assertRaises(TestException, hm.health_check) self.assertEqual(4, mock_session.rollback.call_count) - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.failover_amphora') @mock.patch('octavia.controller.worker.v2.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora', return_value=None) @mock.patch('octavia.db.api.get_session') def test_health_check_nonstale_amphora(self, session_mock, - get_stale_amp_mock, failover_mockv2, + get_stale_amp_mock, failover_mock): get_stale_amp_mock.side_effect = [None, TestException('test')] @@ -104,20 +100,15 @@ class TestHealthManager(base.TestCase): hm.health_check() session_mock.assert_called_once_with(autocommit=False) - if CONF.api_settings.default_provider_driver == 'amphorav2': - self.assertFalse(failover_mockv2.called) - else: - self.assertFalse(failover_mock.called) + self.assertFalse(failover_mock.called) - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.failover_amphora') @mock.patch('octavia.controller.worker.v2.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora', return_value=None) @mock.patch('octavia.db.api.get_session') def test_health_check_exit(self, session_mock, get_stale_amp_mock, - failover_mockv2, failover_mock): + failover_mock): get_stale_amp_mock.return_value = None exit_event = threading.Event() @@ -125,20 +116,15 @@ class TestHealthManager(base.TestCase): hm.health_check() session_mock.assert_called_once_with(autocommit=False) - if CONF.api_settings.default_provider_driver == 'amphorav2': - self.assertFalse(failover_mockv2.called) - else: - self.assertFalse(failover_mock.called) + self.assertFalse(failover_mock.called) - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.failover_amphora') @mock.patch('octavia.controller.worker.v2.controller_worker.' 'ControllerWorker.failover_amphora') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.' 'get_stale_amphora', return_value=None) @mock.patch('octavia.db.api.get_session') def test_health_check_db_error(self, session_mock, get_stale_amp_mock, - failover_mockv2, failover_mock): + failover_mock): get_stale_amp_mock.return_value = None mock_session = mock.MagicMock() diff --git a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py index 3a10713ae4..baad2fb04a 100644 --- a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py +++ b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py @@ -158,73 +158,12 @@ class TestCertRotation(base.TestCase): super().setUp() self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.amphora_cert_rotation') - @mock.patch('octavia.db.repositories.AmphoraRepository.' - 'get_cert_expiring_amphora') - @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_expired_amphora_with_exception(self, session, - cert_exp_amp_mock, - amp_cert_mock - ): - self.CONF.config(group="api_settings", - default_provider_driver='amphorav1') - amphora = mock.MagicMock() - amphora.id = AMPHORA_ID - - session.return_value = session - cert_exp_amp_mock.side_effect = [amphora, TestException( - 'break_while')] - - cr = house_keeping.CertRotation() - self.assertRaises(TestException, cr.rotate) - amp_cert_mock.assert_called_once_with(AMPHORA_ID) - - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.amphora_cert_rotation') - @mock.patch('octavia.db.repositories.AmphoraRepository.' - 'get_cert_expiring_amphora') - @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_expired_amphora_without_exception(self, session, - cert_exp_amp_mock, - amp_cert_mock - ): - self.CONF.config(group="api_settings", - default_provider_driver='amphorav1') - amphora = mock.MagicMock() - amphora.id = AMPHORA_ID - - session.return_value = session - cert_exp_amp_mock.side_effect = [amphora, None] - - cr = house_keeping.CertRotation() - - self.assertIsNone(cr.rotate()) - amp_cert_mock.assert_called_once_with(AMPHORA_ID) - - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker.amphora_cert_rotation') - @mock.patch('octavia.db.repositories.AmphoraRepository.' - 'get_cert_expiring_amphora') - @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_non_expired_amphora(self, session, - cert_exp_amp_mock, - amp_cert_mock): - self.CONF.config(group="api_settings", - default_provider_driver='amphorav1') - - session.return_value = session - cert_exp_amp_mock.return_value = None - cr = house_keeping.CertRotation() - cr.rotate() - self.assertFalse(amp_cert_mock.called) - @mock.patch('octavia.controller.worker.v2.controller_worker.' 'ControllerWorker.amphora_cert_rotation') @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_expired_amphora_with_exception_amphorav2( + def test_cert_rotation_expired_amphora_with_exception( self, session, cert_exp_amp_mock, amp_cert_mock): self.CONF.config(group="api_settings", default_provider_driver='amphora') @@ -245,7 +184,7 @@ class TestCertRotation(base.TestCase): @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_expired_amphora_without_exception_amphorav2( + def test_cert_rotation_expired_amphora_without_exception( self, session, cert_exp_amp_mock, amp_cert_mock): self.CONF.config(group="api_settings", default_provider_driver='amphora') @@ -265,7 +204,7 @@ class TestCertRotation(base.TestCase): @mock.patch('octavia.db.repositories.AmphoraRepository.' 'get_cert_expiring_amphora') @mock.patch('octavia.db.api.get_session') - def test_cert_rotation_non_expired_amphora_amphorav2( + def test_cert_rotation_non_expired_amphora( self, session, cert_exp_amp_mock, amp_cert_mock): self.CONF.config(group="api_settings", default_provider_driver='amphora') diff --git a/octavia/tests/unit/controller/queue/v1/__init__.py b/octavia/tests/unit/controller/queue/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/tests/unit/controller/queue/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/tests/unit/controller/queue/v1/test_consumer.py b/octavia/tests/unit/controller/queue/v1/test_consumer.py deleted file mode 100644 index ae1c9a7291..0000000000 --- a/octavia/tests/unit/controller/queue/v1/test_consumer.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -import oslo_messaging as messaging - -from octavia.controller.queue.v1 import consumer -from octavia.controller.queue.v1 import endpoints -from octavia.tests.unit import base - - -class TestConsumer(base.TestRpc): - - def setUp(self): - super().setUp() - conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - conf.config(group="oslo_messaging", topic='foo_topic') - conf.config(host='test-hostname') - self.conf = conf.conf - - @mock.patch.object(messaging, 'Target') - @mock.patch.object(endpoints, 'Endpoints') - @mock.patch.object(messaging, 'get_rpc_server') - def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target): - mock_rpc_server_rv = mock.Mock() - mock_rpc_server.return_value = mock_rpc_server_rv - mock_endpoint_rv = mock.Mock() - mock_endpoint.return_value = mock_endpoint_rv - mock_target_rv = mock.Mock() - mock_target.return_value = mock_target_rv - - consumer.ConsumerService(1, self.conf).run() - - mock_target.assert_called_once_with(topic='foo_topic', - server='test-hostname', - fanout=False) - mock_endpoint.assert_called_once_with() - - @mock.patch.object(messaging, 'get_rpc_server') - def test_consumer_terminate(self, mock_rpc_server): - mock_rpc_server_rv = mock.Mock() - mock_rpc_server.return_value = mock_rpc_server_rv - - cons = consumer.ConsumerService(1, self.conf) - cons.run() - cons.terminate() - mock_rpc_server_rv.stop.assert_called_once_with() - mock_rpc_server_rv.wait.assert_called_once_with() diff --git a/octavia/tests/unit/controller/queue/v1/test_endpoints.py b/octavia/tests/unit/controller/queue/v1/test_endpoints.py deleted file mode 100644 index 78d485ba3f..0000000000 --- a/octavia/tests/unit/controller/queue/v1/test_endpoints.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2014 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils - -from octavia.controller.queue.v1 import endpoints -from octavia.controller.worker.v1 import controller_worker -from octavia.tests.unit import base - - -class TestEndpoints(base.TestCase): - - def setUp(self): - super().setUp() - - conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - conf.config(octavia_plugins='hot_plug_plugin') - - mock_class = mock.create_autospec(controller_worker.ControllerWorker) - self.worker_patcher = mock.patch('octavia.controller.queue.v1.' - 'endpoints.stevedore_driver') - self.worker_patcher.start().ControllerWorker = mock_class - - self.ep = endpoints.Endpoints() - self.context = {} - self.resource_updates = {} - self.resource_id = 1234 - self.server_group_id = 3456 - self.flavor_id = uuidutils.generate_uuid() - self.availability_zone = uuidutils.generate_uuid() - - def test_create_load_balancer(self): - self.ep.create_load_balancer(self.context, self.resource_id, - flavor=self.flavor_id, - availability_zone=self.availability_zone) - self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, self.flavor_id, self.availability_zone) - - def test_create_load_balancer_no_flavor_or_az(self): - self.ep.create_load_balancer(self.context, self.resource_id) - self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, None, None) - - def test_update_load_balancer(self): - self.ep.update_load_balancer(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_load_balancer.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_load_balancer(self): - self.ep.delete_load_balancer(self.context, self.resource_id) - self.ep.worker.delete_load_balancer.assert_called_once_with( - self.resource_id, False) - - def test_failover_load_balancer(self): - self.ep.failover_load_balancer(self.context, self.resource_id) - self.ep.worker.failover_loadbalancer.assert_called_once_with( - self.resource_id) - - def test_failover_amphora(self): - self.ep.failover_amphora(self.context, self.resource_id) - self.ep.worker.failover_amphora.assert_called_once_with( - self.resource_id) - - def test_create_listener(self): - self.ep.create_listener(self.context, self.resource_id) - self.ep.worker.create_listener.assert_called_once_with( - self.resource_id) - - def test_update_listener(self): - self.ep.update_listener(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_listener.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_listener(self): - self.ep.delete_listener(self.context, self.resource_id) - self.ep.worker.delete_listener.assert_called_once_with( - self.resource_id) - - def test_create_pool(self): - self.ep.create_pool(self.context, self.resource_id) - self.ep.worker.create_pool.assert_called_once_with( - self.resource_id) - - def test_update_pool(self): - self.ep.update_pool(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_pool.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_pool(self): - self.ep.delete_pool(self.context, self.resource_id) - self.ep.worker.delete_pool.assert_called_once_with( - self.resource_id) - - def test_create_health_monitor(self): - self.ep.create_health_monitor(self.context, self.resource_id) - self.ep.worker.create_health_monitor.assert_called_once_with( - self.resource_id) - - def test_update_health_monitor(self): - self.ep.update_health_monitor(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_health_monitor.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_health_monitor(self): - self.ep.delete_health_monitor(self.context, self.resource_id) - self.ep.worker.delete_health_monitor.assert_called_once_with( - self.resource_id) - - def test_create_member(self): - self.ep.create_member(self.context, self.resource_id) - self.ep.worker.create_member.assert_called_once_with( - self.resource_id) - - def test_update_member(self): - self.ep.update_member(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_member.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_batch_update_members(self): - self.ep.batch_update_members( - self.context, [9], [11], [self.resource_updates]) - self.ep.worker.batch_update_members.assert_called_once_with( - [9], [11], [self.resource_updates]) - - def test_delete_member(self): - self.ep.delete_member(self.context, self.resource_id) - self.ep.worker.delete_member.assert_called_once_with( - self.resource_id) - - def test_create_l7policy(self): - self.ep.create_l7policy(self.context, self.resource_id) - self.ep.worker.create_l7policy.assert_called_once_with( - self.resource_id) - - def test_update_l7policy(self): - self.ep.update_l7policy(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_l7policy.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_l7policy(self): - self.ep.delete_l7policy(self.context, self.resource_id) - self.ep.worker.delete_l7policy.assert_called_once_with( - self.resource_id) - - def test_create_l7rule(self): - self.ep.create_l7rule(self.context, self.resource_id) - self.ep.worker.create_l7rule.assert_called_once_with( - self.resource_id) - - def test_update_l7rule(self): - self.ep.update_l7rule(self.context, self.resource_id, - self.resource_updates) - self.ep.worker.update_l7rule.assert_called_once_with( - self.resource_id, self.resource_updates) - - def test_delete_l7rule(self): - self.ep.delete_l7rule(self.context, self.resource_id) - self.ep.worker.delete_l7rule.assert_called_once_with( - self.resource_id) - - def test_update_amphora_agent_config(self): - self.ep.update_amphora_agent_config(self.context, self.resource_id) - self.ep.worker.update_amphora_agent_config.assert_called_once_with( - self.resource_id) - - def test_delete_amphora(self): - self.ep.delete_amphora(self.context, self.resource_id) - self.ep.worker.delete_amphora.assert_called_once_with( - self.resource_id) diff --git a/octavia/tests/unit/controller/worker/v1/__init__.py b/octavia/tests/unit/controller/worker/v1/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/tests/unit/controller/worker/v1/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/tests/unit/controller/worker/v1/flows/__init__.py b/octavia/tests/unit/controller/worker/v1/flows/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py deleted file mode 100644 index e9f61abf92..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.common import data_models -from octavia.controller.worker.v1.flows import amphora_flows -import octavia.tests.unit.base as base - -AUTH_VERSION = '2' - - -# NOTE: We patch the get_network_driver for all the calls so we don't -# inadvertently make real calls. -@mock.patch('octavia.common.utils.get_network_driver') -class TestAmphoraFlows(base.TestCase): - - def setUp(self): - super().setUp() - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config( - group="controller_worker", - amphora_driver='amphora_haproxy_rest_driver') - self.conf.config(group="nova", enable_anti_affinity=False) - self.AmpFlow = amphora_flows.AmphoraFlows() - self.amp1 = data_models.Amphora(id=1) - self.amp2 = data_models.Amphora(id=2) - self.amp3 = data_models.Amphora(id=3, status=constants.DELETED) - self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid()) - self.lb = data_models.LoadBalancer( - id=4, amphorae=[self.amp1, self.amp2, self.amp3]) - - def test_get_create_amphora_flow(self, mock_get_net_driver): - - amp_flow = self.AmpFlow.get_create_amphora_flow() - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) - - def test_get_create_amphora_flow_cert(self, mock_get_net_driver): - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow.get_create_amphora_flow() - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) - - def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver): - - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_STANDALONE) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - - def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): - - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_STANDALONE) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - - def test_get_cert_master_create_amphora_for_lb_flow( - self, mock_get_net_driver): - - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_MASTER) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - - def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( - self, mock_get_net_driver): - - self.conf.config(group="nova", enable_anti_affinity=True) - - self.AmpFlow = amphora_flows.AmphoraFlows() - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_MASTER) - - self.assertIsInstance(amp_flow, flow.Flow) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - self.conf.config(group="nova", enable_anti_affinity=False) - - def test_get_cert_backup_create_amphora_for_lb_flow( - self, mock_get_net_driver): - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_BACKUP) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - - def test_get_cert_bogus_create_amphora_for_lb_flow( - self, mock_get_net_driver): - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', 'BOGUS_ROLE') - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - - def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( - self, mock_get_net_driver): - self.conf.config(group="nova", enable_anti_affinity=True) - - self.AmpFlow = amphora_flows.AmphoraFlows() - amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow( - 'SOMEPREFIX', constants.ROLE_BACKUP) - - self.assertIsInstance(amp_flow, flow.Flow) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - - self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(5, len(amp_flow.requires)) - self.conf.config(group="nova", enable_anti_affinity=False) - - def test_get_delete_amphora_flow(self, mock_get_net_driver): - - amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4) - - self.assertIsInstance(amp_flow, flow.Flow) - - # This flow injects the required data at flow compile time. - self.assertEqual(0, len(amp_flow.provides)) - self.assertEqual(0, len(amp_flow.requires)) - - def test_get_failover_flow_act_stdby(self, mock_get_net_driver): - failed_amphora = data_models.Amphora( - id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER, - load_balancer_id=uuidutils.generate_uuid()) - - amp_flow = self.AmpFlow.get_failover_amphora_flow( - failed_amphora, 2) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.LOADBALANCER, amp_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.VIP, amp_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, amp_flow.provides) - self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides) - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.AMPHORAE, amp_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) - self.assertIn(constants.BASE_PORT, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.DELTA, amp_flow.provides) - self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - self.assertIn(constants.VIP_SG_ID, amp_flow.provides) - - self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(13, len(amp_flow.provides)) - - def test_get_failover_flow_standalone(self, mock_get_net_driver): - failed_amphora = data_models.Amphora( - id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE, - load_balancer_id=uuidutils.generate_uuid(), vrrp_ip='2001:3b8::32') - - amp_flow = self.AmpFlow.get_failover_amphora_flow( - failed_amphora, 1) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires) - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.LOADBALANCER, amp_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.VIP, amp_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, amp_flow.provides) - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.AMPHORAE, amp_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) - self.assertIn(constants.BASE_PORT, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.assertIn(constants.DELTA, amp_flow.provides) - self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertIn(constants.SERVER_PEM, amp_flow.provides) - self.assertIn(constants.VIP_SG_ID, amp_flow.provides) - - self.assertEqual(7, len(amp_flow.requires)) - self.assertEqual(12, len(amp_flow.provides)) - - def test_get_failover_flow_bogus_role(self, mock_get_net_driver): - failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(), - role='bogus') - - amp_flow = self.AmpFlow.get_failover_amphora_flow( - failed_amphora, 1) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - - self.assertIn(constants.VIP_SG_ID, amp_flow.provides) - - print(amp_flow.requires) - self.assertEqual(1, len(amp_flow.requires)) - self.assertEqual(1, len(amp_flow.provides)) - - def test_cert_rotate_amphora_flow(self, mock_get_net_driver): - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow() - self.assertIsInstance(amp_rotate_flow, flow.Flow) - - self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides) - self.assertIn(constants.AMPHORA, amp_rotate_flow.requires) - - self.assertEqual(1, len(amp_rotate_flow.provides)) - self.assertEqual(2, len(amp_rotate_flow.requires)) - - def test_get_vrrp_subflow(self, mock_get_net_driver): - vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123') - - self.assertIsInstance(vrrp_subflow, flow.Flow) - - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) - self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) - - self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) - self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) - - self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) - - def test_get_vrrp_subflow_dont_create_vrrp_group( - self, mock_get_net_driver): - vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123', - create_vrrp_group=False) - - self.assertIsInstance(vrrp_subflow, flow.Flow) - - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides) - self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides) - - self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires) - self.assertIn(constants.AMPHORAE, vrrp_subflow.requires) - - self.assertEqual(2, len(vrrp_subflow.provides)) - self.assertEqual(2, len(vrrp_subflow.requires)) - - def test_get_post_map_lb_subflow(self, mock_get_net_driver): - - self.AmpFlow = amphora_flows.AmphoraFlows() - - amp_flow = self.AmpFlow._get_post_map_lb_subflow( - 'SOMEPREFIX', constants.ROLE_MASTER) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.AMPHORA_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - - self.assertEqual(1, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - amp_flow = self.AmpFlow._get_post_map_lb_subflow( - 'SOMEPREFIX', constants.ROLE_BACKUP) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.AMPHORA_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - - self.assertEqual(1, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - amp_flow = self.AmpFlow._get_post_map_lb_subflow( - 'SOMEPREFIX', constants.ROLE_STANDALONE) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.AMPHORA_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - - self.assertEqual(1, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - amp_flow = self.AmpFlow._get_post_map_lb_subflow( - 'SOMEPREFIX', 'BOGUS_ROLE') - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.FLAVOR, amp_flow.requires) - self.assertIn(constants.AMPHORA_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - - self.assertEqual(1, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - def test_update_amphora_config_flow(self, mock_get_net_driver): - - amp_flow = self.AmpFlow.update_amphora_config_flow() - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.AMPHORA, amp_flow.requires) - self.assertIn(constants.FLAVOR, amp_flow.requires) - - self.assertEqual(2, len(amp_flow.requires)) - self.assertEqual(0, len(amp_flow.provides)) - - def test_get_amphora_for_lb_failover_flow_single(self, - mock_get_net_driver): - FAILED_PORT_ID = uuidutils.generate_uuid() - TEST_PREFIX = 'test_prefix' - - get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow( - TEST_PREFIX, role=constants.ROLE_STANDALONE, - failed_amp_vrrp_port_id=FAILED_PORT_ID, is_vrrp_ipv6=True) - - self.assertIsInstance(get_amp_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires) - self.assertIn(constants.FLAVOR, get_amp_flow.requires) - self.assertIn(constants.LOADBALANCER, get_amp_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires) - self.assertIn(constants.VIP, get_amp_flow.requires) - self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, get_amp_flow.provides) - self.assertIn(constants.AMPHORA, get_amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides) - self.assertIn(constants.BASE_PORT, get_amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides) - self.assertIn(constants.DELTA, get_amp_flow.provides) - self.assertIn(constants.SERVER_PEM, get_amp_flow.provides) - - self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires) - self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides) - - def test_get_amphora_for_lb_failover_flow_act_stdby(self, - mock_get_net_driver): - TEST_PREFIX = 'test_prefix' - - get_amp_flow = self.AmpFlow.get_amphora_for_lb_failover_subflow( - TEST_PREFIX, role=constants.ROLE_MASTER) - - self.assertIsInstance(get_amp_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, get_amp_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, get_amp_flow.requires) - self.assertIn(constants.FLAVOR, get_amp_flow.requires) - self.assertIn(constants.LOADBALANCER, get_amp_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, get_amp_flow.requires) - self.assertIn(constants.VIP, get_amp_flow.requires) - self.assertIn(constants.VIP_SG_ID, get_amp_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, get_amp_flow.provides) - self.assertIn(constants.AMPHORA, get_amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, get_amp_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, get_amp_flow.provides) - self.assertIn(constants.BASE_PORT, get_amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, get_amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, get_amp_flow.provides) - self.assertIn(constants.DELTA, get_amp_flow.provides) - self.assertIn(constants.SERVER_PEM, get_amp_flow.provides) - - self.assertEqual(8, len(get_amp_flow.requires), get_amp_flow.requires) - self.assertEqual(9, len(get_amp_flow.provides), get_amp_flow.provides) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py deleted file mode 100644 index 407880a927..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import health_monitor_flows -import octavia.tests.unit.base as base - - -class TestHealthMonitorFlows(base.TestCase): - - def setUp(self): - self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows() - - super().setUp() - - def test_get_create_health_monitor_flow(self): - - health_mon_flow = (self.HealthMonitorFlow. - get_create_health_monitor_flow()) - - self.assertIsInstance(health_mon_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, health_mon_flow.requires) - self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) - self.assertIn(constants.POOL, health_mon_flow.requires) - - self.assertEqual(4, len(health_mon_flow.requires)) - self.assertEqual(0, len(health_mon_flow.provides)) - - def test_get_delete_health_monitor_flow(self): - - health_mon_flow = (self.HealthMonitorFlow. - get_delete_health_monitor_flow()) - - self.assertIsInstance(health_mon_flow, flow.Flow) - - self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) - self.assertIn(constants.LISTENERS, health_mon_flow.requires) - self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) - self.assertIn(constants.POOL, health_mon_flow.requires) - - self.assertEqual(4, len(health_mon_flow.requires)) - self.assertEqual(0, len(health_mon_flow.provides)) - - def test_get_update_health_monitor_flow(self): - - health_mon_flow = (self.HealthMonitorFlow. - get_update_health_monitor_flow()) - - self.assertIsInstance(health_mon_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, health_mon_flow.requires) - self.assertIn(constants.LOADBALANCER, health_mon_flow.requires) - self.assertIn(constants.HEALTH_MON, health_mon_flow.requires) - self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires) - - self.assertEqual(5, len(health_mon_flow.requires)) - self.assertEqual(0, len(health_mon_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py deleted file mode 100644 index 7c4b73e047..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2016 Blue Box, an IBM Company -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import l7policy_flows -import octavia.tests.unit.base as base - - -class TestL7PolicyFlows(base.TestCase): - - def setUp(self): - self.L7PolicyFlow = l7policy_flows.L7PolicyFlows() - - super().setUp() - - def test_get_create_l7policy_flow(self): - - l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow() - - self.assertIsInstance(l7policy_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, l7policy_flow.requires) - self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) - - self.assertEqual(3, len(l7policy_flow.requires)) - self.assertEqual(0, len(l7policy_flow.provides)) - - def test_get_delete_l7policy_flow(self): - - l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow() - - self.assertIsInstance(l7policy_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, l7policy_flow.requires) - self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) - self.assertIn(constants.L7POLICY, l7policy_flow.requires) - - self.assertEqual(3, len(l7policy_flow.requires)) - self.assertEqual(0, len(l7policy_flow.provides)) - - def test_get_update_l7policy_flow(self): - - l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow() - - self.assertIsInstance(l7policy_flow, flow.Flow) - - self.assertIn(constants.L7POLICY, l7policy_flow.requires) - self.assertIn(constants.LISTENERS, l7policy_flow.requires) - self.assertIn(constants.LOADBALANCER, l7policy_flow.requires) - self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires) - - self.assertEqual(4, len(l7policy_flow.requires)) - self.assertEqual(0, len(l7policy_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py deleted file mode 100644 index 50102ffb8a..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2016 Blue Box, an IBM Company -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import l7rule_flows -import octavia.tests.unit.base as base - - -class TestL7RuleFlows(base.TestCase): - - def setUp(self): - self.L7RuleFlow = l7rule_flows.L7RuleFlows() - - super().setUp() - - def test_get_create_l7rule_flow(self): - - l7rule_flow = self.L7RuleFlow.get_create_l7rule_flow() - - self.assertIsInstance(l7rule_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, l7rule_flow.requires) - self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) - - self.assertEqual(4, len(l7rule_flow.requires)) - self.assertEqual(0, len(l7rule_flow.provides)) - - def test_get_delete_l7rule_flow(self): - - l7rule_flow = self.L7RuleFlow.get_delete_l7rule_flow() - - self.assertIsInstance(l7rule_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, l7rule_flow.requires) - self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) - self.assertIn(constants.L7RULE, l7rule_flow.requires) - - self.assertEqual(4, len(l7rule_flow.requires)) - self.assertEqual(0, len(l7rule_flow.provides)) - - def test_get_update_l7rule_flow(self): - - l7rule_flow = self.L7RuleFlow.get_update_l7rule_flow() - - self.assertIsInstance(l7rule_flow, flow.Flow) - - self.assertIn(constants.L7RULE, l7rule_flow.requires) - self.assertIn(constants.LISTENERS, l7rule_flow.requires) - self.assertIn(constants.LOADBALANCER, l7rule_flow.requires) - self.assertIn(constants.UPDATE_DICT, l7rule_flow.requires) - - self.assertEqual(5, len(l7rule_flow.requires)) - self.assertEqual(0, len(l7rule_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py deleted file mode 100644 index 2fd1090e0b..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import listener_flows -import octavia.tests.unit.base as base - - -# NOTE: We patch the get_network_driver for all the calls so we don't -# inadvertently make real calls. -@mock.patch('octavia.common.utils.get_network_driver') -class TestListenerFlows(base.TestCase): - - def setUp(self): - self.ListenerFlow = listener_flows.ListenerFlows() - - super().setUp() - - def test_get_create_listener_flow(self, mock_get_net_driver): - - listener_flow = self.ListenerFlow.get_create_listener_flow() - - self.assertIsInstance(listener_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER, listener_flow.requires) - self.assertIn(constants.LISTENERS, listener_flow.requires) - - self.assertEqual(2, len(listener_flow.requires)) - self.assertEqual(0, len(listener_flow.provides)) - - def test_get_delete_listener_flow(self, mock_get_net_driver): - - listener_flow = self.ListenerFlow.get_delete_listener_flow() - - self.assertIsInstance(listener_flow, flow.Flow) - - self.assertIn(constants.LISTENER, listener_flow.requires) - self.assertIn(constants.LOADBALANCER, listener_flow.requires) - - self.assertEqual(2, len(listener_flow.requires)) - self.assertEqual(0, len(listener_flow.provides)) - - def test_get_delete_listener_internal_flow(self, mock_get_net_driver): - listener_flow = self.ListenerFlow.get_delete_listener_internal_flow( - 'test-listener') - - self.assertIsInstance(listener_flow, flow.Flow) - - self.assertIn('test-listener', listener_flow.requires) - self.assertIn(constants.LOADBALANCER, listener_flow.requires) - - self.assertEqual(2, len(listener_flow.requires)) - self.assertEqual(0, len(listener_flow.provides)) - - def test_get_update_listener_flow(self, mock_get_net_driver): - - listener_flow = self.ListenerFlow.get_update_listener_flow() - - self.assertIsInstance(listener_flow, flow.Flow) - - self.assertIn(constants.LISTENER, listener_flow.requires) - self.assertIn(constants.LOADBALANCER, listener_flow.requires) - self.assertIn(constants.UPDATE_DICT, listener_flow.requires) - self.assertIn(constants.LISTENERS, listener_flow.requires) - - self.assertEqual(4, len(listener_flow.requires)) - self.assertEqual(0, len(listener_flow.provides)) - - def test_get_create_all_listeners_flow(self, mock_get_net_driver): - listeners_flow = self.ListenerFlow.get_create_all_listeners_flow() - self.assertIsInstance(listeners_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER, listeners_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, listeners_flow.requires) - self.assertIn(constants.LOADBALANCER, listeners_flow.provides) - self.assertEqual(2, len(listeners_flow.requires)) - self.assertEqual(2, len(listeners_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py deleted file mode 100644 index e3ec1d9b5a..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.common import exceptions -from octavia.controller.worker.v1.flows import load_balancer_flows -import octavia.tests.unit.base as base - - -# NOTE: We patch the get_network_driver for all the calls so we don't -# inadvertently make real calls. -@mock.patch('octavia.common.utils.get_network_driver') -class TestLoadBalancerFlows(base.TestCase): - - def setUp(self): - super().setUp() - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config( - group="controller_worker", - amphora_driver='amphora_haproxy_rest_driver') - self.conf.config(group="nova", enable_anti_affinity=False) - self.LBFlow = load_balancer_flows.LoadBalancerFlows() - - def test_get_create_load_balancer_flow(self, mock_get_net_driver): - amp_flow = self.LBFlow.get_create_load_balancer_flow( - constants.TOPOLOGY_SINGLE) - self.assertIsInstance(amp_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - - def test_get_create_active_standby_load_balancer_flow( - self, mock_get_net_driver): - amp_flow = self.LBFlow.get_create_load_balancer_flow( - constants.TOPOLOGY_ACTIVE_STANDBY) - self.assertIsInstance(amp_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - - def test_get_create_anti_affinity_active_standby_load_balancer_flow( - self, mock_get_net_driver): - self.conf.config(group="nova", enable_anti_affinity=True) - - self._LBFlow = load_balancer_flows.LoadBalancerFlows() - amp_flow = self._LBFlow.get_create_load_balancer_flow( - constants.TOPOLOGY_ACTIVE_STANDBY) - self.assertIsInstance(amp_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.SERVER_GROUP_ID, amp_flow.provides) - self.assertIn(constants.AMPHORA, amp_flow.provides) - self.assertIn(constants.AMPHORA_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_ID, amp_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) - self.conf.config(group="nova", enable_anti_affinity=False) - - def test_get_create_bogus_topology_load_balancer_flow( - self, mock_get_net_driver): - self.assertRaises(exceptions.InvalidTopology, - self.LBFlow.get_create_load_balancer_flow, - 'BOGUS') - - def test_get_delete_load_balancer_flow(self, mock_get_net_driver): - lb_mock = mock.Mock() - listener_mock = mock.Mock() - listener_mock.id = '123' - lb_mock.listeners = [listener_mock] - - lb_flow, store = self.LBFlow.get_delete_load_balancer_flow(lb_mock) - - self.assertIsInstance(lb_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER, lb_flow.requires) - self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires) - - self.assertEqual(0, len(lb_flow.provides)) - self.assertEqual(2, len(lb_flow.requires)) - - def test_get_delete_load_balancer_flow_cascade(self, mock_get_net_driver): - lb_mock = mock.Mock() - listener_mock = mock.Mock() - listener_mock.id = '123' - lb_mock.listeners = [listener_mock] - pool_mock = mock.Mock() - pool_mock.id = '345' - lb_mock.pools = [pool_mock] - l7_mock = mock.Mock() - l7_mock.id = '678' - listener_mock.l7policies = [l7_mock] - - lb_flow, store = self.LBFlow.get_cascade_delete_load_balancer_flow( - lb_mock) - - self.assertIsInstance(lb_flow, flow.Flow) - self.assertEqual({'listener_123': listener_mock, - 'pool345': pool_mock}, store) - - self.assertIn(constants.LOADBALANCER, lb_flow.requires) - - self.assertEqual(1, len(lb_flow.provides)) - self.assertEqual(4, len(lb_flow.requires)) - - def test_get_update_load_balancer_flow(self, mock_get_net_driver): - - lb_flow = self.LBFlow.get_update_load_balancer_flow() - - self.assertIsInstance(lb_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER, lb_flow.requires) - self.assertIn(constants.UPDATE_DICT, lb_flow.requires) - - self.assertEqual(0, len(lb_flow.provides)) - self.assertEqual(2, len(lb_flow.requires)) - - def test_get_post_lb_amp_association_flow(self, mock_get_net_driver): - amp_flow = self.LBFlow.get_post_lb_amp_association_flow( - '123', constants.TOPOLOGY_SINGLE) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.UPDATE_DICT, amp_flow.requires) - self.assertIn(constants.LOADBALANCER, amp_flow.provides) - - self.assertEqual(1, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - # Test Active/Standby path - amp_flow = self.LBFlow.get_post_lb_amp_association_flow( - '123', constants.TOPOLOGY_ACTIVE_STANDBY) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.UPDATE_DICT, amp_flow.requires) - self.assertIn(constants.LOADBALANCER, amp_flow.provides) - - self.assertEqual(4, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - amp_flow = self.LBFlow.get_post_lb_amp_association_flow( - '123', constants.TOPOLOGY_ACTIVE_STANDBY) - - self.assertIsInstance(amp_flow, flow.Flow) - - self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) - self.assertIn(constants.UPDATE_DICT, amp_flow.requires) - self.assertIn(constants.LOADBALANCER, amp_flow.provides) - - self.assertEqual(4, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) - - def test_get_create_load_balancer_flows_single_listeners( - self, mock_get_net_driver): - create_flow = ( - self.LBFlow.get_create_load_balancer_flow( - constants.TOPOLOGY_SINGLE, True - ) - ) - self.assertIsInstance(create_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) - self.assertIn(constants.UPDATE_DICT, create_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, create_flow.requires) - self.assertIn(constants.FLAVOR, create_flow.requires) - self.assertIn(constants.AVAILABILITY_ZONE, create_flow.requires) - self.assertIn(constants.SERVER_GROUP_ID, create_flow.requires) - - self.assertIn(constants.LISTENERS, create_flow.provides) - self.assertIn(constants.SUBNET, create_flow.provides) - self.assertIn(constants.AMPHORA, create_flow.provides) - self.assertIn(constants.AMPHORA_ID, create_flow.provides) - self.assertIn(constants.COMPUTE_ID, create_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) - self.assertIn(constants.LOADBALANCER, create_flow.provides) - self.assertIn(constants.DELTAS, create_flow.provides) - self.assertIn(constants.UPDATED_PORTS, create_flow.provides) - self.assertIn(constants.VIP, create_flow.provides) - self.assertIn(constants.AMP_DATA, create_flow.provides) - self.assertIn(constants.SERVER_PEM, create_flow.provides) - self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) - - self.assertEqual(6, len(create_flow.requires)) - self.assertEqual(14, len(create_flow.provides)) - - def test_get_create_load_balancer_flows_active_standby_listeners( - self, mock_get_net_driver): - create_flow = ( - self.LBFlow.get_create_load_balancer_flow( - constants.TOPOLOGY_ACTIVE_STANDBY, True - ) - ) - self.assertIsInstance(create_flow, flow.Flow) - self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) - self.assertIn(constants.UPDATE_DICT, create_flow.requires) - - self.assertIn(constants.LISTENERS, create_flow.provides) - self.assertIn(constants.AMPHORA, create_flow.provides) - self.assertIn(constants.AMPHORA_ID, create_flow.provides) - self.assertIn(constants.COMPUTE_ID, create_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) - self.assertIn(constants.LOADBALANCER, create_flow.provides) - self.assertIn(constants.DELTAS, create_flow.provides) - self.assertIn(constants.UPDATED_PORTS, create_flow.provides) - self.assertIn(constants.VIP, create_flow.provides) - self.assertIn(constants.AMP_DATA, create_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, - create_flow.provides) - - self.assertEqual(6, len(create_flow.requires)) - self.assertEqual(16, len(create_flow.provides), - create_flow.provides) - - def _test_get_failover_LB_flow_single(self, amphorae): - lb_mock = mock.MagicMock() - lb_mock.id = uuidutils.generate_uuid() - lb_mock.topology = constants.TOPOLOGY_SINGLE - - failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) - - self.assertIsInstance(failover_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) - self.assertIn(constants.FLAVOR, failover_flow.requires) - self.assertIn(constants.LOADBALANCER, failover_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, failover_flow.provides) - self.assertIn(constants.AMPHORA, failover_flow.provides) - self.assertIn(constants.AMPHORA_ID, failover_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, - failover_flow.provides) - self.assertIn(constants.BASE_PORT, failover_flow.provides) - self.assertIn(constants.COMPUTE_ID, failover_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) - self.assertIn(constants.DELTA, failover_flow.provides) - self.assertIn(constants.LOADBALANCER, failover_flow.provides) - self.assertIn(constants.SERVER_PEM, failover_flow.provides) - self.assertIn(constants.VIP, failover_flow.provides) - self.assertIn(constants.VIP_SG_ID, failover_flow.provides) - - self.assertEqual(6, len(failover_flow.requires), - failover_flow.requires) - self.assertEqual(12, len(failover_flow.provides), - failover_flow.provides) - - def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver): - self._test_get_failover_LB_flow_single([]) - - def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = constants.ROLE_STANDALONE - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = None - amphora_mock.vrrp_ip = None - - self._test_get_failover_LB_flow_single([amphora_mock]) - - def test_get_failover_LB_flow_one_bogus_amp_single(self, - mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = 'bogus' - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = None - amphora_mock.vrrp_ip = None - - self._test_get_failover_LB_flow_single([amphora_mock]) - - def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora2_mock = mock.MagicMock() - amphora2_mock.role = constants.ROLE_STANDALONE - amphora2_mock.lb_network_id = uuidutils.generate_uuid() - amphora2_mock.compute_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_port_id = None - amphora2_mock.vrrp_ip = None - - self._test_get_failover_LB_flow_single([amphora_mock, amphora2_mock]) - - def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae): - lb_mock = mock.MagicMock() - lb_mock.id = uuidutils.generate_uuid() - lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - - failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock) - - self.assertIsInstance(failover_flow, flow.Flow) - - self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires) - self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires) - self.assertIn(constants.FLAVOR, failover_flow.requires) - self.assertIn(constants.LOADBALANCER, failover_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires) - - self.assertIn(constants.UPDATED_PORTS, failover_flow.provides) - self.assertIn(constants.AMP_VRRP_INT, failover_flow.provides) - self.assertIn(constants.AMPHORA, failover_flow.provides) - self.assertIn(constants.AMPHORA_ID, failover_flow.provides) - self.assertIn(constants.AMPHORAE, failover_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, - failover_flow.provides) - self.assertIn(constants.BASE_PORT, failover_flow.provides) - self.assertIn(constants.COMPUTE_ID, failover_flow.provides) - self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides) - self.assertIn(constants.DELTA, failover_flow.provides) - self.assertIn(constants.FIRST_AMP_NETWORK_CONFIGS, - failover_flow.provides) - self.assertIn(constants.FIRST_AMP_VRRP_INTERFACE, - failover_flow.provides) - self.assertIn(constants.LOADBALANCER, failover_flow.provides) - self.assertIn(constants.SERVER_PEM, failover_flow.provides) - self.assertIn(constants.VIP, failover_flow.provides) - self.assertIn(constants.VIP_SG_ID, failover_flow.provides) - - self.assertEqual(6, len(failover_flow.requires), - failover_flow.requires) - self.assertEqual(16, len(failover_flow.provides), - failover_flow.provides) - - def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver): - self._test_get_failover_LB_flow_no_amps_act_stdby([]) - - def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae): - amphora_mock = mock.MagicMock() - amphora_mock.role = constants.ROLE_MASTER - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = None - amphora_mock.vrrp_ip = None - - self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock]) - - def test_get_failover_LB_flow_two_amps_act_stdby(self, - mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = constants.ROLE_MASTER - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora_mock.vrrp_ip = '192.0.2.46' - amphora2_mock = mock.MagicMock() - amphora2_mock.role = constants.ROLE_BACKUP - amphora2_mock.lb_network_id = uuidutils.generate_uuid() - amphora2_mock.compute_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_ip = '2001:db8::46' - - self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, - amphora2_mock]) - - def test_get_failover_LB_flow_three_amps_act_stdby(self, - mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = constants.ROLE_MASTER - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora_mock.vrrp_ip = '192.0.2.46' - amphora2_mock = mock.MagicMock() - amphora2_mock.role = constants.ROLE_BACKUP - amphora2_mock.lb_network_id = uuidutils.generate_uuid() - amphora2_mock.compute_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_ip = '2001:db8::46' - amphora3_mock = mock.MagicMock() - amphora3_mock.vrrp_ip = None - - self._test_get_failover_LB_flow_no_amps_act_stdby( - [amphora_mock, amphora2_mock, amphora3_mock]) - - def test_get_failover_LB_flow_two_amps_bogus_act_stdby( - self, mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = 'bogus' - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora_mock.vrrp_ip = '192.0.2.46' - amphora2_mock = mock.MagicMock() - amphora2_mock.role = constants.ROLE_MASTER - amphora2_mock.lb_network_id = uuidutils.generate_uuid() - amphora2_mock.compute_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_ip = '2001:db8::46' - - self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, - amphora2_mock]) - - def test_get_failover_LB_flow_two_amps_standalone_act_stdby( - self, mock_get_net_driver): - amphora_mock = mock.MagicMock() - amphora_mock.role = constants.ROLE_STANDALONE - amphora_mock.lb_network_id = uuidutils.generate_uuid() - amphora_mock.compute_id = uuidutils.generate_uuid() - amphora_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora_mock.vrrp_ip = '192.0.2.46' - amphora2_mock = mock.MagicMock() - amphora2_mock.role = constants.ROLE_MASTER - amphora2_mock.lb_network_id = uuidutils.generate_uuid() - amphora2_mock.compute_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_port_id = uuidutils.generate_uuid() - amphora2_mock.vrrp_ip = '2001:db8::46' - - self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_mock, - amphora2_mock]) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py deleted file mode 100644 index 68780355b6..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import member_flows -import octavia.tests.unit.base as base - - -# NOTE: We patch the get_network_driver for all the calls so we don't -# inadvertently make real calls. -@mock.patch('octavia.common.utils.get_network_driver') -class TestMemberFlows(base.TestCase): - - def setUp(self): - self.MemberFlow = member_flows.MemberFlows() - - super().setUp() - - def test_get_create_member_flow(self, mock_get_net_driver): - - member_flow = self.MemberFlow.get_create_member_flow() - - self.assertIsInstance(member_flow, flow.Flow) - - self.assertIn(constants.MEMBER, member_flow.requires) - self.assertIn(constants.LISTENERS, member_flow.requires) - self.assertIn(constants.LOADBALANCER, member_flow.requires) - self.assertIn(constants.POOL, member_flow.requires) - self.assertIn(constants.MEMBER, member_flow.requires) - self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) - - self.assertIn(constants.DELTAS, member_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) - self.assertIn(constants.UPDATED_PORTS, member_flow.provides) - - self.assertEqual(6, len(member_flow.requires)) - self.assertEqual(3, len(member_flow.provides)) - - def test_get_delete_member_flow(self, mock_get_net_driver): - - member_flow = self.MemberFlow.get_delete_member_flow() - - self.assertIsInstance(member_flow, flow.Flow) - - self.assertIn(constants.MEMBER, member_flow.requires) - self.assertIn(constants.LISTENERS, member_flow.requires) - self.assertIn(constants.LOADBALANCER, member_flow.requires) - self.assertIn(constants.LOADBALANCER_ID, member_flow.requires) - self.assertIn(constants.POOL, member_flow.requires) - self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) - - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) - self.assertIn(constants.DELTAS, member_flow.provides) - self.assertIn(constants.UPDATED_PORTS, member_flow.provides) - - self.assertEqual(6, len(member_flow.requires)) - self.assertEqual(3, len(member_flow.provides)) - - def test_get_update_member_flow(self, mock_get_net_driver): - - member_flow = self.MemberFlow.get_update_member_flow() - - self.assertIsInstance(member_flow, flow.Flow) - - self.assertIn(constants.MEMBER, member_flow.requires) - self.assertIn(constants.LISTENERS, member_flow.requires) - self.assertIn(constants.LOADBALANCER, member_flow.requires) - self.assertIn(constants.POOL, member_flow.requires) - self.assertIn(constants.UPDATE_DICT, member_flow.requires) - - self.assertEqual(5, len(member_flow.requires)) - self.assertEqual(0, len(member_flow.provides)) - - def test_get_batch_update_members_flow(self, mock_get_net_driver): - - member_flow = self.MemberFlow.get_batch_update_members_flow( - [], [], []) - - self.assertIsInstance(member_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, member_flow.requires) - self.assertIn(constants.LOADBALANCER, member_flow.requires) - self.assertIn(constants.POOL, member_flow.requires) - self.assertIn(constants.AVAILABILITY_ZONE, member_flow.requires) - - self.assertIn(constants.DELTAS, member_flow.provides) - self.assertIn(constants.UPDATED_PORTS, member_flow.provides) - self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, member_flow.provides) - - self.assertEqual(5, len(member_flow.requires)) - self.assertEqual(3, len(member_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py deleted file mode 100644 index 908a4b4f29..0000000000 --- a/octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from taskflow.patterns import linear_flow as flow - -from octavia.common import constants -from octavia.controller.worker.v1.flows import pool_flows -import octavia.tests.unit.base as base - - -class TestPoolFlows(base.TestCase): - - def setUp(self): - self.PoolFlow = pool_flows.PoolFlows() - - super().setUp() - - def test_get_create_pool_flow(self): - - pool_flow = self.PoolFlow.get_create_pool_flow() - - self.assertIsInstance(pool_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, pool_flow.requires) - self.assertIn(constants.LOADBALANCER, pool_flow.requires) - - self.assertEqual(3, len(pool_flow.requires)) - self.assertEqual(0, len(pool_flow.provides)) - - def test_get_delete_pool_flow(self): - - pool_flow = self.PoolFlow.get_delete_pool_flow() - - self.assertIsInstance(pool_flow, flow.Flow) - - self.assertIn(constants.LISTENERS, pool_flow.requires) - self.assertIn(constants.LOADBALANCER, pool_flow.requires) - self.assertIn(constants.POOL, pool_flow.requires) - - self.assertEqual(3, len(pool_flow.requires)) - self.assertEqual(1, len(pool_flow.provides)) - - def test_get_delete_pool_flow_internal(self): - - pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test') - - self.assertIsInstance(pool_flow, flow.Flow) - self.assertIn('test', pool_flow.requires) - - self.assertEqual(1, len(pool_flow.requires)) - self.assertEqual(1, len(pool_flow.provides)) - - def test_get_update_pool_flow(self): - - pool_flow = self.PoolFlow.get_update_pool_flow() - - self.assertIsInstance(pool_flow, flow.Flow) - - self.assertIn(constants.POOL, pool_flow.requires) - self.assertIn(constants.LISTENERS, pool_flow.requires) - self.assertIn(constants.LOADBALANCER, pool_flow.requires) - self.assertIn(constants.UPDATE_DICT, pool_flow.requires) - - self.assertEqual(4, len(pool_flow.requires)) - self.assertEqual(0, len(pool_flow.provides)) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/__init__.py b/octavia/tests/unit/controller/worker/v1/tasks/__init__.py deleted file mode 100644 index 94e731d201..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py deleted file mode 100644 index 6e7a4e0743..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +++ /dev/null @@ -1,792 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from cryptography import fernet -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils -from taskflow.types import failure - -from octavia.amphorae.driver_exceptions import exceptions as driver_except -from octavia.common import constants -from octavia.common import data_models -from octavia.common import utils -from octavia.controller.worker.v1.tasks import amphora_driver_tasks -from octavia.db import repositories as repo -import octavia.tests.unit.base as base - - -AMP_ID = uuidutils.generate_uuid() -COMPUTE_ID = uuidutils.generate_uuid() -LISTENER_ID = uuidutils.generate_uuid() -LB_ID = uuidutils.generate_uuid() -CONN_MAX_RETRIES = 10 -CONN_RETRY_INTERVAL = 6 -FAKE_CONFIG_FILE = 'fake config file' - -_amphora_mock = mock.MagicMock() -_amphora_mock.id = AMP_ID -_amphora_mock.status = constants.AMPHORA_ALLOCATED -_amphora_mock.vrrp_ip = '198.51.100.65' -_load_balancer_mock = mock.MagicMock() -_load_balancer_mock.id = LB_ID -_listener_mock = mock.MagicMock() -_listener_mock.id = LISTENER_ID -_load_balancer_mock.listeners = [_listener_mock] -_vip_mock = mock.MagicMock() -_load_balancer_mock.vip = _vip_mock -_LB_mock = mock.MagicMock() -_amphorae_mock = [_amphora_mock] -_amphora_network_config_mock = mock.MagicMock() -_amphorae_network_config_mock = { - _amphora_mock.id: _amphora_network_config_mock} -_network_mock = mock.MagicMock() -_port_mock = mock.MagicMock() -_ports_mock = [_port_mock] -_session_mock = mock.MagicMock() - - -@mock.patch('octavia.db.repositories.AmphoraRepository.update') -@mock.patch('octavia.db.repositories.ListenerRepository.update') -@mock.patch('octavia.db.repositories.ListenerRepository.get', - return_value=_listener_mock) -@mock.patch('octavia.db.api.get_session', return_value=_session_mock) -@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG') -@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) -@mock.patch('stevedore.driver.DriverManager.driver') -class TestAmphoraDriverTasks(base.TestCase): - - def setUp(self): - - _LB_mock.amphorae = [_amphora_mock] - _LB_mock.id = LB_ID - conf = oslo_fixture.Config(cfg.CONF) - conf.config(group="haproxy_amphora", - active_connection_max_retries=CONN_MAX_RETRIES) - conf.config(group="haproxy_amphora", - active_connection_retry_interval=CONN_RETRY_INTERVAL) - conf.config(group="controller_worker", - loadbalancer_topology=constants.TOPOLOGY_SINGLE) - self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1, - constants.REQ_READ_TIMEOUT: 2, - constants.CONN_MAX_RETRIES: 3, - constants.CONN_RETRY_INTERVAL: 4} - super().setUp() - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_amp_listeners_update(self, - mock_lb_repo_get, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - mock_lb_repo_get.return_value = _LB_mock - amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate() - amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock, - self.timeout_dict) - - mock_driver.update_amphora_listeners.assert_called_once_with( - _LB_mock, _amphora_mock, self.timeout_dict) - - mock_driver.update_amphora_listeners.side_effect = Exception('boom') - - amp_list_update_obj.execute(_load_balancer_mock, _amphora_mock, - self.timeout_dict) - - mock_amphora_repo_update.assert_called_once_with( - _session_mock, AMP_ID, status=constants.ERROR) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_amphorae_listeners_update(self, - mock_lb_repo_get, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - mock_lb_repo_get.return_value = _LB_mock - amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate() - amp_list_update_obj.execute(_load_balancer_mock, 0, - [_amphora_mock], self.timeout_dict) - - mock_driver.update_amphora_listeners.assert_called_once_with( - _LB_mock, _amphora_mock, self.timeout_dict) - - mock_driver.update_amphora_listeners.side_effect = Exception('boom') - - amp_list_update_obj.execute(_load_balancer_mock, 0, - [_amphora_mock], self.timeout_dict) - - mock_amphora_repo_update.assert_called_once_with( - _session_mock, AMP_ID, status=constants.ERROR) - - def test_listener_update(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - listener_update_obj = amphora_driver_tasks.ListenersUpdate() - listener_update_obj.execute(_load_balancer_mock) - - mock_driver.update.assert_called_once_with(_load_balancer_mock) - - # Test the revert - amp = listener_update_obj.revert(_load_balancer_mock) - repo.ListenerRepository.update.assert_called_once_with( - _session_mock, - id=LISTENER_ID, - provisioning_status=constants.ERROR) - self.assertIsNone(amp) - - # Test the revert with exception - repo.ListenerRepository.update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - amp = listener_update_obj.revert(_load_balancer_mock) - repo.ListenerRepository.update.assert_called_once_with( - _session_mock, - id=LISTENER_ID, - provisioning_status=constants.ERROR) - self.assertIsNone(amp) - - def test_listeners_update(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - listeners_update_obj = amphora_driver_tasks.ListenersUpdate() - listeners = [data_models.Listener(id='listener1'), - data_models.Listener(id='listener2')] - vip = data_models.Vip(ip_address='10.0.0.1') - lb = data_models.LoadBalancer(id='lb1', listeners=listeners, vip=vip) - listeners_update_obj.execute(lb) - mock_driver.update.assert_called_once_with(lb) - self.assertEqual(1, mock_driver.update.call_count) - - # Test the revert - amp = listeners_update_obj.revert(lb) - expected_db_calls = [mock.call(_session_mock, - id=listeners[0].id, - provisioning_status=constants.ERROR), - mock.call(_session_mock, - id=listeners[1].id, - provisioning_status=constants.ERROR)] - repo.ListenerRepository.update.assert_has_calls(expected_db_calls) - self.assertEqual(2, repo.ListenerRepository.update.call_count) - self.assertIsNone(amp) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_error') - def test_amphora_index_listeners_reload( - self, mock_prov_status_error, mock_driver, mock_generate_uuid, - mock_log, mock_get_session, mock_listener_repo_get, - mock_listener_repo_update, mock_amphora_repo_update): - amphora_mock = mock.MagicMock() - listeners_reload_obj = ( - amphora_driver_tasks.AmphoraIndexListenersReload()) - mock_lb = mock.MagicMock() - mock_listener = mock.MagicMock() - mock_listener.id = '12345' - mock_driver.reload.side_effect = [mock.DEFAULT, Exception('boom')] - - # Test no listeners - mock_lb.listeners = None - listeners_reload_obj.execute(mock_lb, 0, None) - mock_driver.reload.assert_not_called() - - # Test with listeners - mock_driver.start.reset_mock() - mock_lb.listeners = [mock_listener] - listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], - timeout_dict=self.timeout_dict) - mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, - self.timeout_dict) - - # Test with reload exception - mock_driver.reload.reset_mock() - listeners_reload_obj.execute(mock_lb, 0, [amphora_mock], - timeout_dict=self.timeout_dict) - mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, - self.timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, amphora_mock.id, status=constants.ERROR) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_error') - def test_listeners_start(self, - mock_prov_status_error, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - listeners_start_obj = amphora_driver_tasks.ListenersStart() - mock_lb = mock.MagicMock() - mock_listener = mock.MagicMock() - mock_listener.id = '12345' - - # Test no listeners - mock_lb.listeners = None - listeners_start_obj.execute(mock_lb) - mock_driver.start.assert_not_called() - - # Test with listeners - mock_driver.start.reset_mock() - mock_lb.listeners = [mock_listener] - listeners_start_obj.execute(mock_lb) - mock_driver.start.assert_called_once_with(mock_lb, None) - - # Test revert - mock_lb.listeners = [mock_listener] - listeners_start_obj.revert(mock_lb) - mock_prov_status_error.assert_called_once_with('12345') - - def test_listener_delete(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - listener_delete_obj = amphora_driver_tasks.ListenerDelete() - listener_delete_obj.execute(_listener_mock) - - mock_driver.delete.assert_called_once_with(_listener_mock) - - # Test the revert - amp = listener_delete_obj.revert(_listener_mock) - repo.ListenerRepository.update.assert_called_once_with( - _session_mock, - id=LISTENER_ID, - provisioning_status=constants.ERROR) - self.assertIsNone(amp) - - # Test the revert with exception - repo.ListenerRepository.update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - amp = listener_delete_obj.revert(_listener_mock) - repo.ListenerRepository.update.assert_called_once_with( - _session_mock, - id=LISTENER_ID, - provisioning_status=constants.ERROR) - self.assertIsNone(amp) - - def test_amphora_get_info(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo() - amphora_get_info_obj.execute(_amphora_mock) - - mock_driver.get_info.assert_called_once_with( - _amphora_mock) - - def test_amphora_get_diagnostics(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphora_get_diagnostics_obj = (amphora_driver_tasks. - AmphoraGetDiagnostics()) - amphora_get_diagnostics_obj.execute(_amphora_mock) - - mock_driver.get_diagnostics.assert_called_once_with( - _amphora_mock) - - def test_amphora_finalize(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize() - amphora_finalize_obj.execute(_amphora_mock) - - mock_driver.finalize_amphora.assert_called_once_with( - _amphora_mock) - - # Test revert - amp = amphora_finalize_obj.revert(None, _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - self.assertIsNone(amp) - - # Test revert with exception - repo.AmphoraRepository.update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - amp = amphora_finalize_obj.revert(None, _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - self.assertIsNone(amp) - - # Test revert when this task failed - repo.AmphoraRepository.update.reset_mock() - amp = amphora_finalize_obj.revert( - failure.Failure.from_exception(Exception('boom')), _amphora_mock) - repo.AmphoraRepository.update.assert_not_called() - - def test_amphora_post_network_plug(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphora_post_network_plug_obj = (amphora_driver_tasks. - AmphoraPostNetworkPlug()) - amphora_post_network_plug_obj.execute(_amphora_mock, _ports_mock, - _amphora_network_config_mock) - - (mock_driver.post_network_plug. - assert_called_once_with)(_amphora_mock, _port_mock, - _amphora_network_config_mock) - - # Test revert - amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - - self.assertIsNone(amp) - - # Test revert with exception - repo.AmphoraRepository.update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - amp = amphora_post_network_plug_obj.revert(None, _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - - self.assertIsNone(amp) - - # Test revert when this task failed - repo.AmphoraRepository.update.reset_mock() - amp = amphora_post_network_plug_obj.revert( - failure.Failure.from_exception(Exception('boom')), _amphora_mock) - repo.AmphoraRepository.update.assert_not_called() - - @mock.patch('octavia.db.repositories.AmphoraRepository.get_all') - def test_amphorae_post_network_plug(self, mock_amp_get_all, mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - mock_driver.get_network.return_value = _network_mock - _amphora_mock.id = AMP_ID - _amphora_mock.compute_id = COMPUTE_ID - mock_amp_get_all.return_value = [[_amphora_mock], None] - amphora_post_network_plug_obj = (amphora_driver_tasks. - AmphoraePostNetworkPlug()) - - port_mock = mock.Mock() - _deltas_mock = {_amphora_mock.id: [port_mock]} - - amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock, - _amphorae_network_config_mock) - - (mock_driver.post_network_plug. - assert_called_once_with(_amphora_mock, port_mock, - _amphora_network_config_mock)) - - # Test with no ports to plug - mock_driver.post_network_plug.reset_mock() - - _deltas_mock = {'0': [port_mock]} - - amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock, - _amphora_network_config_mock) - mock_driver.post_network_plug.assert_not_called() - - # Test revert - amp = amphora_post_network_plug_obj.revert(None, _LB_mock, - _deltas_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - - self.assertIsNone(amp) - - # Test revert with exception - repo.AmphoraRepository.update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - amp = amphora_post_network_plug_obj.revert(None, _LB_mock, - _deltas_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - - self.assertIsNone(amp) - - # Test revert when this task failed - repo.AmphoraRepository.update.reset_mock() - amp = amphora_post_network_plug_obj.revert( - failure.Failure.from_exception(Exception('boom')), _amphora_mock, - None) - repo.AmphoraRepository.update.assert_not_called() - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_amphora_post_vip_plug(self, - mock_loadbalancer_repo_update, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphorae_net_config_mock = mock.Mock() - amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug() - amphora_post_vip_plug_obj.execute(_amphora_mock, - _LB_mock, - amphorae_net_config_mock) - - mock_driver.post_vip_plug.assert_called_once_with( - _amphora_mock, _LB_mock, amphorae_net_config_mock) - - # Test revert - amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - repo.LoadBalancerRepository.update.assert_not_called() - - self.assertIsNone(amp) - - # Test revert with repo exceptions - repo.AmphoraRepository.update.reset_mock() - repo.LoadBalancerRepository.update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mock_loadbalancer_repo_update.side_effect = Exception('fail') - amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock) - repo.AmphoraRepository.update.assert_called_once_with( - _session_mock, - id=AMP_ID, - status=constants.ERROR) - repo.LoadBalancerRepository.update.assert_not_called() - - self.assertIsNone(amp) - - # Test revert when this task failed - repo.AmphoraRepository.update.reset_mock() - amp = amphora_post_vip_plug_obj.revert( - failure.Failure.from_exception(Exception('boom')), _amphora_mock, - None) - repo.AmphoraRepository.update.assert_not_called() - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_amphorae_post_vip_plug(self, - mock_loadbalancer_repo_update, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - - amphorae_net_config_mock = mock.Mock() - amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug() - amphora_post_vip_plug_obj.execute(_LB_mock, - amphorae_net_config_mock) - - mock_driver.post_vip_plug.assert_called_once_with( - _amphora_mock, _LB_mock, amphorae_net_config_mock) - - def test_amphora_cert_upload(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - pem_file_mock = fer.encrypt( - utils.get_compatible_value('test-pem-file')) - amphora_cert_upload_mock = amphora_driver_tasks.AmphoraCertUpload() - amphora_cert_upload_mock.execute(_amphora_mock, pem_file_mock) - - mock_driver.upload_cert_amp.assert_called_once_with( - _amphora_mock, fer.decrypt(pem_file_mock)) - - def test_amphora_update_vrrp_interface(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - FAKE_INTERFACE = 'fake0' - _LB_mock.amphorae = _amphorae_mock - mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, - Exception('boom')] - - timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, - constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} - - amphora_update_vrrp_interface_obj = ( - amphora_driver_tasks.AmphoraUpdateVRRPInterface()) - amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) - mock_driver.get_interface_from_ip.assert_called_once_with( - _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE) - - # Test with an exception - mock_amphora_repo_update.reset_mock() - amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, status=constants.ERROR) - - def test_amphora_index_update_vrrp_interface( - self, mock_driver, mock_generate_uuid, mock_log, mock_get_session, - mock_listener_repo_get, mock_listener_repo_update, - mock_amphora_repo_update): - FAKE_INTERFACE = 'fake0' - _LB_mock.amphorae = _amphorae_mock - mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE, - Exception('boom')] - - timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, - constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} - - amphora_update_vrrp_interface_obj = ( - amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) - amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) - mock_driver.get_interface_from_ip.assert_called_once_with( - _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, vrrp_interface=FAKE_INTERFACE) - - # Test with an exception - mock_amphora_repo_update.reset_mock() - amphora_update_vrrp_interface_obj.execute( - 0, [_amphora_mock], timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, status=constants.ERROR) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_amphora_vrrp_update(self, - mock_lb_get, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - amphorae_network_config = mock.MagicMock() - mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, - Exception('boom')] - mock_lb_get.return_value = _LB_mock - amphora_vrrp_update_obj = ( - amphora_driver_tasks.AmphoraVRRPUpdate()) - amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - _amphora_mock, 'fakeint0') - mock_driver.update_vrrp_conf.assert_called_once_with( - _LB_mock, amphorae_network_config, _amphora_mock, None) - - # Test with an exception - mock_amphora_repo_update.reset_mock() - amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - _amphora_mock, 'fakeint0') - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, status=constants.ERROR) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_amphora_index_vrrp_update(self, - mock_lb_get, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - amphorae_network_config = mock.MagicMock() - mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT, - Exception('boom')] - mock_lb_get.return_value = _LB_mock - amphora_vrrp_update_obj = ( - amphora_driver_tasks.AmphoraIndexVRRPUpdate()) - - amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0', - timeout_dict=self.timeout_dict) - mock_driver.update_vrrp_conf.assert_called_once_with( - _LB_mock, amphorae_network_config, _amphora_mock, - self.timeout_dict) - - # Test with an exception - mock_amphora_repo_update.reset_mock() - amphora_vrrp_update_obj.execute(_LB_mock.id, amphorae_network_config, - 0, [_amphora_mock], 'fakeint0') - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, status=constants.ERROR) - - def test_amphora_vrrp_start(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - amphora_vrrp_start_obj = ( - amphora_driver_tasks.AmphoraVRRPStart()) - amphora_vrrp_start_obj.execute(_amphora_mock, - timeout_dict=self.timeout_dict) - mock_driver.start_vrrp_service.assert_called_once_with( - _amphora_mock, self.timeout_dict) - - def test_amphora_index_vrrp_start(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - amphora_vrrp_start_obj = ( - amphora_driver_tasks.AmphoraIndexVRRPStart()) - mock_driver.start_vrrp_service.side_effect = [mock.DEFAULT, - Exception('boom')] - - amphora_vrrp_start_obj.execute(0, [_amphora_mock], - timeout_dict=self.timeout_dict) - mock_driver.start_vrrp_service.assert_called_once_with( - _amphora_mock, self.timeout_dict) - - # Test with a start exception - mock_driver.start_vrrp_service.reset_mock() - amphora_vrrp_start_obj.execute(0, [_amphora_mock], - timeout_dict=self.timeout_dict) - mock_driver.start_vrrp_service.assert_called_once_with( - _amphora_mock, self.timeout_dict) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, _amphora_mock.id, status=constants.ERROR) - - def test_amphora_compute_connectivity_wait(self, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - amp_compute_conn_wait_obj = ( - amphora_driver_tasks.AmphoraComputeConnectivityWait()) - amp_compute_conn_wait_obj.execute(_amphora_mock) - mock_driver.get_info.assert_called_once_with(_amphora_mock) - - mock_driver.get_info.side_effect = driver_except.TimeOutException() - self.assertRaises(driver_except.TimeOutException, - amp_compute_conn_wait_obj.execute, _amphora_mock) - mock_amphora_repo_update.assert_called_once_with( - _session_mock, AMP_ID, status=constants.ERROR) - - @mock.patch('octavia.amphorae.backends.agent.agent_jinja_cfg.' - 'AgentJinjaTemplater.build_agent_config') - def test_amphora_config_update(self, - mock_build_config, - mock_driver, - mock_generate_uuid, - mock_log, - mock_get_session, - mock_listener_repo_get, - mock_listener_repo_update, - mock_amphora_repo_update): - mock_build_config.return_value = FAKE_CONFIG_FILE - amp_config_update_obj = amphora_driver_tasks.AmphoraConfigUpdate() - mock_driver.update_amphora_agent_config.side_effect = [ - None, None, driver_except.AmpDriverNotImplementedError, - driver_except.TimeOutException] - # With Flavor - flavor = {constants.LOADBALANCER_TOPOLOGY: - constants.TOPOLOGY_ACTIVE_STANDBY} - amp_config_update_obj.execute(_amphora_mock, flavor) - mock_build_config.assert_called_once_with( - _amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) - mock_driver.update_amphora_agent_config.assert_called_once_with( - _amphora_mock, FAKE_CONFIG_FILE) - # With no Flavor - mock_driver.reset_mock() - mock_build_config.reset_mock() - amp_config_update_obj.execute(_amphora_mock, None) - mock_build_config.assert_called_once_with( - _amphora_mock.id, constants.TOPOLOGY_SINGLE) - mock_driver.update_amphora_agent_config.assert_called_once_with( - _amphora_mock, FAKE_CONFIG_FILE) - # With amphora that does not support config update - mock_driver.reset_mock() - mock_build_config.reset_mock() - amp_config_update_obj.execute(_amphora_mock, flavor) - mock_build_config.assert_called_once_with( - _amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY) - mock_driver.update_amphora_agent_config.assert_called_once_with( - _amphora_mock, FAKE_CONFIG_FILE) - # With an unknown exception - mock_driver.reset_mock() - mock_build_config.reset_mock() - self.assertRaises(driver_except.TimeOutException, - amp_config_update_obj.execute, - _amphora_mock, flavor) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py b/octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py deleted file mode 100644 index 04d6242f52..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from cryptography import fernet -from oslo_config import cfg - -from octavia.certificates.common import local -from octavia.common import utils -from octavia.controller.worker.v1.tasks import cert_task -import octavia.tests.unit.base as base - -CONF = cfg.CONF - - -class TestCertTasks(base.TestCase): - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_execute(self, mock_driver): - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - dummy_cert = local.LocalCert( - utils.get_compatible_value('test_cert'), - utils.get_compatible_value('test_key')) - mock_driver.generate_cert_key_pair.side_effect = [dummy_cert] - c = cert_task.GenerateServerPEMTask() - pem = c.execute('123') - self.assertEqual( - fer.decrypt(pem), - dummy_cert.get_certificate() + - dummy_cert.get_private_key() - ) - mock_driver.generate_cert_key_pair.assert_called_once_with( - cn='123', validity=CONF.certificates.cert_validity_time) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py deleted file mode 100644 index 2729276432..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +++ /dev/null @@ -1,634 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from cryptography import fernet -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils -import tenacity - -from octavia.common import constants -from octavia.common import exceptions -from octavia.common import utils -from octavia.controller.worker.v1.tasks import compute_tasks -from octavia.tests.common import utils as test_utils -import octavia.tests.unit.base as base - - -AMP_FLAVOR_ID = '10' -AMP_IMAGE_TAG = 'glance_tag' -AMP_SSH_KEY_NAME = None -AMP_NET = [uuidutils.generate_uuid()] -AMP_SEC_GROUPS = [] -AMP_WAIT = 12 -AMPHORA_ID = uuidutils.generate_uuid() -COMPUTE_ID = uuidutils.generate_uuid() -LB_NET_IP = '192.0.2.1' -PORT_ID = uuidutils.generate_uuid() -SERVER_GRPOUP_ID = uuidutils.generate_uuid() - - -class TestException(Exception): - - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - - -_amphora_mock = mock.MagicMock() -_amphora_mock.id = AMPHORA_ID -_amphora_mock.compute_id = COMPUTE_ID -_load_balancer_mock = mock.MagicMock() -_load_balancer_mock.amphorae = [_amphora_mock] -_port = mock.MagicMock() -_port.id = PORT_ID - - -class TestComputeTasks(base.TestCase): - - def setUp(self): - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - self.conf.config( - group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID) - self.conf.config( - group="controller_worker", amp_image_tag=AMP_IMAGE_TAG) - self.conf.config( - group="controller_worker", amp_ssh_key_name=AMP_SSH_KEY_NAME) - self.conf.config( - group="controller_worker", amp_boot_network_list=AMP_NET) - self.conf.config( - group="controller_worker", amp_active_wait_sec=AMP_WAIT) - self.conf.config( - group="controller_worker", amp_secgroup_list=AMP_SEC_GROUPS) - self.conf.config(group="controller_worker", amp_image_owner_id='') - - _amphora_mock.id = AMPHORA_ID - _amphora_mock.status = constants.AMPHORA_ALLOCATED - - logging_mock = mock.MagicMock() - compute_tasks.LOG = logging_mock - - super().setUp() - - @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' - 'LoggingJinjaTemplater.build_logging_config') - @mock.patch('jinja2.Environment.get_template') - @mock.patch('octavia.amphorae.backends.agent.' - 'agent_jinja_cfg.AgentJinjaTemplater.' - 'build_agent_config', return_value='test_conf') - @mock.patch('octavia.common.jinja.' - 'user_data_jinja_cfg.UserDataJinjaCfg.' - 'build_user_data_config', return_value='user_data_conf') - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_create(self, mock_driver, mock_ud_conf, - mock_conf, mock_jinja, mock_log_cfg): - - image_owner_id = uuidutils.generate_uuid() - self.conf.config( - group="controller_worker", amp_image_owner_id=image_owner_id) - mock_log_cfg.return_value = 'FAKE CFG' - - createcompute = compute_tasks.ComputeCreate() - - mock_driver.build.return_value = COMPUTE_ID - # Test execute() - compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], - server_group_id=SERVER_GRPOUP_ID) - - # Validate that the build method was called properly - mock_driver.build.assert_called_once_with( - name="amphora-" + _amphora_mock.id, - amphora_flavor=AMP_FLAVOR_ID, - image_tag=AMP_IMAGE_TAG, - image_owner=image_owner_id, - key_name=AMP_SSH_KEY_NAME, - sec_groups=AMP_SEC_GROUPS, - network_ids=AMP_NET, - port_ids=[PORT_ID], - config_drive_files={'/etc/octavia/' - 'amphora-agent.conf': 'test_conf', - '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, - user_data='user_data_conf', - server_group_id=SERVER_GRPOUP_ID, - availability_zone=None) - - # Make sure it returns the expected compute_id - self.assertEqual(COMPUTE_ID, compute_id) - - # Test that a build exception is raised - createcompute = compute_tasks.ComputeCreate() - - self.assertRaises(TypeError, - createcompute.execute, - _amphora_mock, config_drive_files='test_cert') - - # Test revert() - - _amphora_mock.compute_id = COMPUTE_ID - - createcompute = compute_tasks.ComputeCreate() - createcompute.revert(compute_id, _amphora_mock.id) - - # Validate that the delete method was called properly - mock_driver.delete.assert_called_once_with( - COMPUTE_ID) - - # Test that a delete exception is not raised - - createcompute.revert(COMPUTE_ID, _amphora_mock.id) - - @mock.patch('jinja2.Environment.get_template') - @mock.patch('octavia.amphorae.backends.agent.' - 'agent_jinja_cfg.AgentJinjaTemplater.' - 'build_agent_config', return_value='test_conf') - @mock.patch('octavia.common.jinja.' - 'user_data_jinja_cfg.UserDataJinjaCfg.' - 'build_user_data_config', return_value='user_data_conf') - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_create_user_data(self, mock_driver, - mock_ud_conf, mock_conf, mock_jinja): - - self.conf.config( - group="controller_worker", user_data_config_drive=True) - createcompute = compute_tasks.ComputeCreate() - - mock_driver.build.return_value = COMPUTE_ID - # Test execute() - compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], - server_group_id=None) - - # Validate that the build method was called properly - mock_driver.build.assert_called_once_with( - name="amphora-" + _amphora_mock.id, - amphora_flavor=AMP_FLAVOR_ID, - image_tag=AMP_IMAGE_TAG, - image_owner='', - key_name=AMP_SSH_KEY_NAME, - sec_groups=AMP_SEC_GROUPS, - network_ids=AMP_NET, - port_ids=[PORT_ID], - config_drive_files=None, - user_data='user_data_conf', - server_group_id=None, - availability_zone=None) - - # Make sure it returns the expected compute_id - self.assertEqual(COMPUTE_ID, compute_id) - - # Test that a build exception is raised - createcompute = compute_tasks.ComputeCreate() - - self.assertRaises(TypeError, - createcompute.execute, - _amphora_mock, config_drive_files='test_cert') - - # Test revert() - - _amphora_mock.compute_id = COMPUTE_ID - - createcompute = compute_tasks.ComputeCreate() - createcompute.revert(compute_id, _amphora_mock.id) - - # Validate that the delete method was called properly - mock_driver.delete.assert_called_once_with( - COMPUTE_ID) - - # Test that a delete exception is not raised - - createcompute.revert(COMPUTE_ID, _amphora_mock.id) - - @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' - 'LoggingJinjaTemplater.build_logging_config') - @mock.patch('jinja2.Environment.get_template') - @mock.patch('octavia.amphorae.backends.agent.' - 'agent_jinja_cfg.AgentJinjaTemplater.' - 'build_agent_config', return_value='test_conf') - @mock.patch('octavia.common.jinja.' - 'user_data_jinja_cfg.UserDataJinjaCfg.' - 'build_user_data_config', return_value='user_data_conf') - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_create_availability_zone(self, mock_driver, mock_ud_conf, - mock_conf, mock_jinja, - mock_log_cfg): - - image_owner_id = uuidutils.generate_uuid() - compute_zone = uuidutils.generate_uuid() - az_dict = {constants.COMPUTE_ZONE: compute_zone} - - self.conf.config( - group="controller_worker", amp_image_owner_id=image_owner_id) - mock_log_cfg.return_value = 'FAKE CFG' - - createcompute = compute_tasks.ComputeCreate() - - mock_driver.build.return_value = COMPUTE_ID - # Test execute() - compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], - server_group_id=SERVER_GRPOUP_ID, - availability_zone=az_dict) - - # Validate that the build method was called properly - mock_driver.build.assert_called_once_with( - name="amphora-" + _amphora_mock.id, - amphora_flavor=AMP_FLAVOR_ID, - image_tag=AMP_IMAGE_TAG, - image_owner=image_owner_id, - key_name=AMP_SSH_KEY_NAME, - sec_groups=AMP_SEC_GROUPS, - network_ids=AMP_NET, - port_ids=[PORT_ID], - config_drive_files={'/etc/octavia/' - 'amphora-agent.conf': 'test_conf', - '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, - user_data='user_data_conf', - server_group_id=SERVER_GRPOUP_ID, - availability_zone=compute_zone) - - # Make sure it returns the expected compute_id - self.assertEqual(COMPUTE_ID, compute_id) - - # Test that a build exception is raised - createcompute = compute_tasks.ComputeCreate() - - self.assertRaises(TypeError, - createcompute.execute, - _amphora_mock, config_drive_files='test_cert') - - # Test revert() - - _amphora_mock.compute_id = COMPUTE_ID - - createcompute = compute_tasks.ComputeCreate() - createcompute.revert(compute_id, _amphora_mock.id) - - # Validate that the delete method was called properly - mock_driver.delete.assert_called_once_with( - COMPUTE_ID) - - # Test that a delete exception is not raised - - createcompute.revert(COMPUTE_ID, _amphora_mock.id) - - @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' - 'LoggingJinjaTemplater.build_logging_config') - @mock.patch('jinja2.Environment.get_template') - @mock.patch('octavia.amphorae.backends.agent.' - 'agent_jinja_cfg.AgentJinjaTemplater.' - 'build_agent_config', return_value='test_conf') - @mock.patch('octavia.common.jinja.' - 'user_data_jinja_cfg.UserDataJinjaCfg.' - 'build_user_data_config', return_value='user_data_conf') - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_create_without_ssh_access( - self, mock_driver, mock_user_data_config, - mock_conf, mock_jinja, mock_log_cfg): - - createcompute = compute_tasks.ComputeCreate() - - mock_driver.build.return_value = COMPUTE_ID - self.conf.config( - group="controller_worker", user_data_config_drive=False) - mock_log_cfg.return_value = 'FAKE CFG' - - # Test execute() - compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], - server_group_id=SERVER_GRPOUP_ID) - - # Validate that the build method was called properly - mock_driver.build.assert_called_once_with( - name="amphora-" + _amphora_mock.id, - amphora_flavor=AMP_FLAVOR_ID, - image_tag=AMP_IMAGE_TAG, - image_owner='', - key_name=None, - sec_groups=AMP_SEC_GROUPS, - network_ids=AMP_NET, - port_ids=[PORT_ID], - config_drive_files={'/etc/octavia/' - 'amphora-agent.conf': 'test_conf', - '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, - user_data='user_data_conf', - server_group_id=SERVER_GRPOUP_ID, - availability_zone=None) - - self.assertEqual(COMPUTE_ID, compute_id) - - # Test that a build exception is raised - createcompute = compute_tasks.ComputeCreate() - - self.assertRaises(TypeError, - createcompute.execute, - _amphora_mock, config_drive_files='test_cert') - - # Test revert() - - _amphora_mock.compute_id = COMPUTE_ID - - createcompute = compute_tasks.ComputeCreate() - createcompute.revert(compute_id, _amphora_mock.id) - - # Validate that the delete method was called properly - mock_driver.delete.assert_called_once_with( - COMPUTE_ID) - - # Test that a delete exception is not raised - - createcompute.revert(COMPUTE_ID, _amphora_mock.id) - - @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' - 'LoggingJinjaTemplater.build_logging_config') - @mock.patch('jinja2.Environment.get_template') - @mock.patch('octavia.amphorae.backends.agent.' - 'agent_jinja_cfg.AgentJinjaTemplater.' - 'build_agent_config', return_value='test_conf') - @mock.patch('octavia.common.jinja.' - 'user_data_jinja_cfg.UserDataJinjaCfg.' - 'build_user_data_config', return_value='user_data_conf') - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_create_cert(self, mock_driver, mock_ud_conf, - mock_conf, mock_jinja, mock_log_cfg): - createcompute = compute_tasks.CertComputeCreate() - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - mock_log_cfg.return_value = 'FAKE CFG' - - mock_driver.build.return_value = COMPUTE_ID - path = '/etc/octavia/certs/ca_01.pem' - self.useFixture(test_utils.OpenFixture(path, 'test')) - # Test execute() - test_cert = fer.encrypt( - utils.get_compatible_value('test_cert') - ) - compute_id = createcompute.execute(_amphora_mock.id, test_cert, - server_group_id=SERVER_GRPOUP_ID - ) - - # Validate that the build method was called properly - mock_driver.build.assert_called_once_with( - name="amphora-" + _amphora_mock.id, - amphora_flavor=AMP_FLAVOR_ID, - image_tag=AMP_IMAGE_TAG, - image_owner='', - key_name=AMP_SSH_KEY_NAME, - sec_groups=AMP_SEC_GROUPS, - network_ids=AMP_NET, - port_ids=[], - user_data='user_data_conf', - config_drive_files={ - '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG', - '/etc/octavia/certs/server.pem': fer.decrypt( - test_cert).decode('utf-8'), - '/etc/octavia/certs/client_ca.pem': 'test', - '/etc/octavia/amphora-agent.conf': 'test_conf'}, - server_group_id=SERVER_GRPOUP_ID, - availability_zone=None) - - self.assertEqual(COMPUTE_ID, compute_id) - - # Test that a build exception is raised - self.useFixture(test_utils.OpenFixture(path, 'test')) - - createcompute = compute_tasks.ComputeCreate() - self.assertRaises(TypeError, - createcompute.execute, - _amphora_mock, - config_drive_files=test_cert) - - # Test revert() - - _amphora_mock.compute_id = COMPUTE_ID - - createcompute = compute_tasks.ComputeCreate() - createcompute.revert(compute_id, _amphora_mock.id) - - # Validate that the delete method was called properly - mock_driver.delete.assert_called_once_with(COMPUTE_ID) - - # Test that a delete exception is not raised - - createcompute.revert(COMPUTE_ID, _amphora_mock.id) - - @mock.patch('octavia.controller.worker.amphora_rate_limit' - '.AmphoraBuildRateLimit.remove_from_build_req_queue') - @mock.patch('stevedore.driver.DriverManager.driver') - @mock.patch('time.sleep') - def test_compute_wait(self, - mock_time_sleep, - mock_driver, - mock_remove_from_build_queue): - - self.conf.config(group='haproxy_amphora', build_rate_limit=5) - _amphora_mock.compute_id = COMPUTE_ID - _amphora_mock.status = constants.ACTIVE - _amphora_mock.lb_network_ip = LB_NET_IP - - mock_driver.get_amphora.return_value = _amphora_mock, None - - computewait = compute_tasks.ComputeActiveWait() - - # Test with no AZ - computewait.execute(COMPUTE_ID, AMPHORA_ID, None) - mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) - - # Test with AZ - mock_driver.reset_mock() - az = {constants.MANAGEMENT_NETWORK: uuidutils.generate_uuid()} - computewait.execute(COMPUTE_ID, AMPHORA_ID, az) - mock_driver.get_amphora.assert_called_once_with( - COMPUTE_ID, az[constants.MANAGEMENT_NETWORK]) - - # Test with deleted amp - _amphora_mock.status = constants.DELETED - self.assertRaises(exceptions.ComputeWaitTimeoutException, - computewait.execute, - _amphora_mock, AMPHORA_ID, None) - - @mock.patch('octavia.controller.worker.amphora_rate_limit' - '.AmphoraBuildRateLimit.remove_from_build_req_queue') - @mock.patch('stevedore.driver.DriverManager.driver') - @mock.patch('time.sleep') - def test_compute_wait_error_status(self, - mock_time_sleep, - mock_driver, - mock_remove_from_build_queue): - - self.conf.config(group='haproxy_amphora', build_rate_limit=5) - _amphora_mock.compute_id = COMPUTE_ID - _amphora_mock.status = constants.ACTIVE - _amphora_mock.lb_network_ip = LB_NET_IP - - mock_driver.get_amphora.return_value = _amphora_mock, None - - computewait = compute_tasks.ComputeActiveWait() - computewait.execute(COMPUTE_ID, AMPHORA_ID, None) - - mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) - - _amphora_mock.status = constants.ERROR - - self.assertRaises(exceptions.ComputeBuildException, - computewait.execute, - _amphora_mock, AMPHORA_ID, None) - - @mock.patch('octavia.controller.worker.amphora_rate_limit' - '.AmphoraBuildRateLimit.remove_from_build_req_queue') - @mock.patch('stevedore.driver.DriverManager.driver') - @mock.patch('time.sleep') - def test_compute_wait_skipped(self, - mock_time_sleep, - mock_driver, - mock_remove_from_build_queue): - _amphora_mock.compute_id = COMPUTE_ID - _amphora_mock.status = constants.ACTIVE - _amphora_mock.lb_network_ip = LB_NET_IP - - mock_driver.get_amphora.return_value = _amphora_mock, None - - computewait = compute_tasks.ComputeActiveWait() - computewait.execute(COMPUTE_ID, AMPHORA_ID, None) - - mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID, None) - mock_remove_from_build_queue.assert_not_called() - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_delete_amphorae_on_load_balancer(self, mock_driver): - - mock_driver.delete.side_effect = [mock.DEFAULT, - exceptions.OctaviaException('boom')] - - delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer() - - delete_amps.execute(_load_balancer_mock) - - mock_driver.delete.assert_called_once_with(COMPUTE_ID) - - # Test compute driver exception is raised - self.assertRaises(exceptions.OctaviaException, delete_amps.execute, - _load_balancer_mock) - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_compute_delete(self, mock_driver): - mock_driver.delete.side_effect = [ - mock.DEFAULT, exceptions.OctaviaException('boom'), - mock.DEFAULT, exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom')] - - delete_compute = compute_tasks.ComputeDelete() - - # Limit the retry attempts for the test run to save time - delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2) - - delete_compute.execute(_amphora_mock) - - mock_driver.delete.assert_called_once_with(COMPUTE_ID) - - # Test retry after a compute exception - mock_driver.reset_mock() - delete_compute.execute(_amphora_mock) - mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), - mock.call(COMPUTE_ID)]) - - # Test passive failure - mock_driver.reset_mock() - delete_compute.execute(_amphora_mock, passive_failure=True) - mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID), - mock.call(COMPUTE_ID)]) - - # Test non-passive failure - mock_driver.reset_mock() - self.assertRaises(exceptions.OctaviaException, delete_compute.execute, - _amphora_mock, passive_failure=False) - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_nova_server_group_create(self, mock_driver): - nova_sever_group_obj = compute_tasks.NovaServerGroupCreate() - - server_group_test_id = '6789' - fake_server_group = mock.MagicMock() - fake_server_group.id = server_group_test_id - fake_server_group.policy = 'anti-affinity' - mock_driver.create_server_group.return_value = fake_server_group - - # Test execute() - sg_id = nova_sever_group_obj.execute('123') - - # Validate that the build method was called properly - mock_driver.create_server_group.assert_called_once_with( - 'octavia-lb-123', 'anti-affinity') - - # Make sure it returns the expected server group_id - self.assertEqual(server_group_test_id, sg_id) - - # Test revert() - nova_sever_group_obj.revert(sg_id) - - # Validate that the delete_server_group method was called properly - mock_driver.delete_server_group.assert_called_once_with(sg_id) - - # Test revert with exception - mock_driver.reset_mock() - mock_driver.delete_server_group.side_effect = Exception('DelSGExcept') - nova_sever_group_obj.revert(sg_id) - mock_driver.delete_server_group.assert_called_once_with(sg_id) - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_nova_server_group_delete_with_sever_group_id(self, mock_driver): - nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() - sg_id = '6789' - nova_sever_group_obj.execute(sg_id) - mock_driver.delete_server_group.assert_called_once_with(sg_id) - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_nova_server_group_delete_with_None(self, mock_driver): - nova_sever_group_obj = compute_tasks.NovaServerGroupDelete() - sg_id = None - nova_sever_group_obj.execute(sg_id) - self.assertFalse(mock_driver.delete_server_group.called, sg_id) - - @mock.patch('stevedore.driver.DriverManager.driver') - def test_attach_port(self, mock_driver): - COMPUTE_ID = uuidutils.generate_uuid() - PORT_ID = uuidutils.generate_uuid() - amphora_mock = mock.MagicMock() - port_mock = mock.MagicMock() - amphora_mock.compute_id = COMPUTE_ID - port_mock.id = PORT_ID - - attach_port_obj = compute_tasks.AttachPort() - - # Test execute - attach_port_obj.execute(amphora_mock, port_mock) - - mock_driver.attach_network_or_port.assert_called_once_with( - COMPUTE_ID, port_id=PORT_ID) - - # Test revert - mock_driver.reset_mock() - - attach_port_obj.revert(amphora_mock, port_mock) - - mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID) - - # Test rever exception - mock_driver.reset_mock() - mock_driver.detach_port.side_effect = [Exception('boom')] - - # should not raise - attach_port_obj.revert(amphora_mock, port_mock) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py deleted file mode 100644 index 412db38a18..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +++ /dev/null @@ -1,2617 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import random -from unittest import mock - -from cryptography import fernet -from oslo_db import exception as odb_exceptions -from oslo_utils import uuidutils -from sqlalchemy.orm import exc -from taskflow.types import failure - -from octavia.common import constants -from octavia.common import data_models -from octavia.common import utils -from octavia.controller.worker.v1.tasks import database_tasks -from octavia.db import repositories as repo -import octavia.tests.unit.base as base - - -AMP_ID = uuidutils.generate_uuid() -COMPUTE_ID = uuidutils.generate_uuid() -LB_ID = uuidutils.generate_uuid() -SERVER_GROUP_ID = uuidutils.generate_uuid() -LB_NET_IP = '192.0.2.2' -LISTENER_ID = uuidutils.generate_uuid() -POOL_ID = uuidutils.generate_uuid() -HM_ID = uuidutils.generate_uuid() -MEMBER_ID = uuidutils.generate_uuid() -PORT_ID = uuidutils.generate_uuid() -SUBNET_ID = uuidutils.generate_uuid() -VRRP_PORT_ID = uuidutils.generate_uuid() -HA_PORT_ID = uuidutils.generate_uuid() -L7POLICY_ID = uuidutils.generate_uuid() -L7RULE_ID = uuidutils.generate_uuid() -VIP_IP = '192.0.5.2' -VRRP_IP = '192.0.5.3' -HA_IP = '192.0.5.4' -AMP_ROLE = 'FAKE_ROLE' -VRRP_ID = 1 -VRRP_PRIORITY = random.randrange(100) -CACHED_ZONE = 'zone1' -IMAGE_ID = uuidutils.generate_uuid() -COMPUTE_FLAVOR = uuidutils.generate_uuid() - -_amphora_mock = mock.MagicMock() -_amphora_mock.id = AMP_ID -_amphora_mock.compute_id = COMPUTE_ID -_amphora_mock.lb_network_ip = LB_NET_IP -_amphora_mock.vrrp_ip = VRRP_IP -_amphora_mock.ha_ip = HA_IP -_amphora_mock.ha_port_id = HA_PORT_ID -_amphora_mock.vrrp_port_id = VRRP_PORT_ID -_amphora_mock.role = AMP_ROLE -_amphora_mock.vrrp_id = VRRP_ID -_amphora_mock.vrrp_priority = VRRP_PRIORITY -_amphorae = [_amphora_mock] -_loadbalancer_mock = mock.MagicMock() -_loadbalancer_mock.id = LB_ID -_loadbalancer_mock.amphorae = [_amphora_mock] -_pool_mock = mock.MagicMock() -_pool_mock.id = POOL_ID -_l7policy_mock = mock.MagicMock() -_l7policy_mock.id = L7POLICY_ID -_l7rule_mock = mock.MagicMock() -_l7rule_mock.id = L7RULE_ID -_listener_mock = mock.MagicMock() -_listener_mock.id = LISTENER_ID -_tf_failure_mock = mock.Mock(spec=failure.Failure) -_vip_mock = mock.MagicMock() -_vip_mock.port_id = PORT_ID -_vip_mock.subnet_id = SUBNET_ID -_vip_mock.ip_address = VIP_IP -_vrrp_group_mock = mock.MagicMock() -_cert_mock = mock.MagicMock() -_compute_mock = mock.MagicMock() -_compute_mock.lb_network_ip = LB_NET_IP -_compute_mock.cached_zone = CACHED_ZONE -_compute_mock.image_id = IMAGE_ID -_compute_mock.compute_flavor = COMPUTE_FLAVOR - - -@mock.patch('octavia.db.repositories.AmphoraRepository.delete') -@mock.patch('octavia.db.repositories.AmphoraRepository.update') -@mock.patch('octavia.db.repositories.ListenerRepository.update') -@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') -@mock.patch('octavia.db.api.get_session', return_value='TEST') -@mock.patch('octavia.controller.worker.v1.tasks.database_tasks.LOG') -@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) -class TestDatabaseTasks(base.TestCase): - - def setUp(self): - - self.health_mon_mock = mock.MagicMock() - self.health_mon_mock.id = HM_ID - self.health_mon_mock.pool_id = POOL_ID - - self.listener_mock = mock.MagicMock() - self.listener_mock.id = LISTENER_ID - - self.loadbalancer_mock = mock.MagicMock() - self.loadbalancer_mock.id = LB_ID - - self.member_mock = mock.MagicMock() - self.member_mock.id = MEMBER_ID - - self.pool_mock = mock.MagicMock() - self.pool_mock.id = POOL_ID - self.pool_mock.health_monitor = self.health_mon_mock - - self.l7policy_mock = mock.MagicMock() - self.l7policy_mock.id = L7POLICY_ID - - self.l7rule_mock = mock.MagicMock() - self.l7rule_mock.id = L7RULE_ID - self.l7rule_mock.l7policy = self.l7policy_mock - - super().setUp() - - @mock.patch('octavia.db.repositories.AmphoraRepository.create', - return_value=_amphora_mock) - def test_create_amphora_in_db(self, - mock_create, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - create_amp_in_db = database_tasks.CreateAmphoraInDB() - amp_id = create_amp_in_db.execute() - - repo.AmphoraRepository.create.assert_called_once_with( - 'TEST', - id=AMP_ID, - load_balancer_id=None, - status=constants.PENDING_CREATE, - cert_busy=False) - - self.assertEqual(_amphora_mock.id, amp_id) - - # Test the revert - create_amp_in_db.revert(_tf_failure_mock) - self.assertFalse(mock_amphora_repo_delete.called) - - mock_amphora_repo_delete.reset_mock() - create_amp_in_db.revert(result='AMP') - self.assertTrue(mock_amphora_repo_delete.called) - mock_amphora_repo_delete.assert_called_once_with( - 'TEST', - id='AMP') - - # Test revert with exception - mock_amphora_repo_delete.reset_mock() - mock_amphora_repo_delete.side_effect = Exception('fail') - create_amp_in_db.revert(result='AMP') - self.assertTrue(mock_amphora_repo_delete.called) - mock_amphora_repo_delete.assert_called_once_with( - 'TEST', - id='AMP') - - @mock.patch('octavia.db.repositories.ListenerRepository.delete') - def test_delete_listener_in_db(self, - mock_listener_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_listener = database_tasks.DeleteListenerInDB() - delete_listener.execute(_listener_mock) - - repo.ListenerRepository.delete.assert_called_once_with( - 'TEST', - id=LISTENER_ID) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') - def test_delete_health_monitor_in_db(self, - mock_health_mon_repo_delete, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_health_mon = database_tasks.DeleteHealthMonitorInDB() - delete_health_mon.execute(self.health_mon_mock) - - repo.HealthMonitorRepository.delete.assert_called_once_with( - 'TEST', id=HM_ID) - - # Test the revert - mock_health_mon_repo_delete.reset_mock() - delete_health_mon.revert(self.health_mon_mock) - - repo.HealthMonitorRepository.update.assert_called_once_with( - 'TEST', id=HM_ID, provisioning_status=constants.ERROR) - - # Test Not Found Exception - mock_health_mon_repo_delete.reset_mock() - mock_health_mon_repo_delete.side_effect = [exc.NoResultFound()] - delete_health_mon.execute(self.health_mon_mock) - - repo.HealthMonitorRepository.delete.assert_called_once_with( - 'TEST', id=HM_ID) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') - def test_delete_health_monitor_in_db_by_pool(self, - mock_health_mon_repo_delete, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_health_mon = database_tasks.DeleteHealthMonitorInDBByPool() - delete_health_mon.execute(self.pool_mock) - - repo.HealthMonitorRepository.delete.assert_called_once_with( - 'TEST', - id=HM_ID) - - # Test the revert - mock_health_mon_repo_delete.reset_mock() - delete_health_mon.revert(self.pool_mock) - - repo.HealthMonitorRepository.update.assert_called_once_with( - 'TEST', id=HM_ID, provisioning_status=constants.ERROR) - -# TODO(johnsom) fix once provisioning status added -# repo.HealthMonitorRepository.update.assert_called_once_with( -# 'TEST', -# POOL_ID, -# provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.delete') - def test_delete_member_in_db(self, - mock_member_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_member = database_tasks.DeleteMemberInDB() - delete_member.execute(self.member_mock) - - repo.MemberRepository.delete.assert_called_once_with( - 'TEST', - id=MEMBER_ID) - - # Test the revert - - mock_member_repo_delete.reset_mock() - delete_member.revert(self.member_mock) - -# TODO(johnsom) Fix -# repo.MemberRepository.delete.assert_called_once_with( -# 'TEST', -# MEMBER_ID) - - @mock.patch('octavia.db.repositories.PoolRepository.delete') - def test_delete_pool_in_db(self, - mock_pool_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_pool = database_tasks.DeletePoolInDB() - delete_pool.execute(_pool_mock) - - repo.PoolRepository.delete.assert_called_once_with( - 'TEST', - id=POOL_ID) - - # Test the revert - - mock_pool_repo_delete.reset_mock() - delete_pool.revert(_pool_mock) - -# TODO(johnsom) Fix -# repo.PoolRepository.update.assert_called_once_with( -# 'TEST', -# POOL_ID, -# operating_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.delete') - def test_delete_l7policy_in_db(self, - mock_l7policy_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_l7policy = database_tasks.DeleteL7PolicyInDB() - delete_l7policy.execute(_l7policy_mock) - - repo.L7PolicyRepository.delete.assert_called_once_with( - 'TEST', - id=L7POLICY_ID) - - # Test the revert - - mock_l7policy_repo_delete.reset_mock() - delete_l7policy.revert(_l7policy_mock) - -# TODO(sbalukoff) Fix -# repo.ListenerRepository.update.assert_called_once_with( -# 'TEST', -# LISTENER_ID, -# operating_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.delete') - def test_delete_l7rule_in_db(self, - mock_l7rule_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - delete_l7rule = database_tasks.DeleteL7RuleInDB() - delete_l7rule.execute(_l7rule_mock) - - repo.L7RuleRepository.delete.assert_called_once_with( - 'TEST', - id=L7RULE_ID) - - # Test the revert - - mock_l7rule_repo_delete.reset_mock() - delete_l7rule.revert(_l7rule_mock) - -# TODO(sbalukoff) Fix -# repo.ListenerRepository.update.assert_called_once_with( -# 'TEST', -# LISTENER_ID, -# operating_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.AmphoraRepository.get', - return_value=_amphora_mock) - def test_reload_amphora(self, - mock_amp_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - reload_amp = database_tasks.ReloadAmphora() - amp = reload_amp.execute(AMP_ID) - - repo.AmphoraRepository.get.assert_called_once_with( - 'TEST', - id=AMP_ID) - - self.assertEqual(_amphora_mock, amp) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', - return_value=_loadbalancer_mock) - def test_reload_load_balancer(self, - mock_lb_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - reload_lb = database_tasks.ReloadLoadBalancer() - lb = reload_lb.execute(LB_ID) - - repo.LoadBalancerRepository.get.assert_called_once_with( - 'TEST', - id=LB_ID) - - self.assertEqual(_loadbalancer_mock, lb) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', - return_value=_loadbalancer_mock) - @mock.patch('octavia.db.repositories.VipRepository.update') - def test_update_vip_after_allocation(self, - mock_vip_update, - mock_loadbalancer_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_vip = database_tasks.UpdateVIPAfterAllocation() - loadbalancer = update_vip.execute(LB_ID, _vip_mock) - - self.assertEqual(_loadbalancer_mock, loadbalancer) - mock_vip_update.assert_called_once_with('TEST', - LB_ID, - port_id=PORT_ID, - subnet_id=SUBNET_ID, - ip_address=VIP_IP) - mock_loadbalancer_get.assert_called_once_with('TEST', - id=LB_ID) - - def test_update_amphora_vip_data(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_amp_vip_data = database_tasks.UpdateAmphoraeVIPData() - update_amp_vip_data.execute(_amphorae) - - mock_amphora_repo_update.assert_called_once_with( - 'TEST', - AMP_ID, - vrrp_ip=VRRP_IP, - ha_ip=HA_IP, - vrrp_port_id=VRRP_PORT_ID, - ha_port_id=HA_PORT_ID, - vrrp_id=1) - - def test_update_amphora_vip_data2(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - update_amp_vip_data2 = database_tasks.UpdateAmphoraVIPData() - update_amp_vip_data2.execute(_amphorae[0]) - - mock_amphora_repo_update.assert_called_once_with( - 'TEST', - AMP_ID, - vrrp_ip=VRRP_IP, - ha_ip=HA_IP, - vrrp_port_id=VRRP_PORT_ID, - ha_port_id=HA_PORT_ID, - vrrp_id=1) - - def test_update_amp_failover_details(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - mock_base_port = mock.MagicMock() - mock_base_port.id = VRRP_PORT_ID - mock_fixed_ip = mock.MagicMock() - mock_fixed_ip.ip_address = VRRP_IP - mock_base_port.fixed_ips = [mock_fixed_ip] - mock_vip = mock.MagicMock() - mock_vip.ip_address = HA_IP - mock_vip.port_id = HA_PORT_ID - - update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails() - update_amp_fo_details.execute(_amphora_mock, mock_vip, mock_base_port) - - mock_amphora_repo_update.assert_called_once_with( - 'TEST', - AMP_ID, - vrrp_ip=VRRP_IP, - ha_ip=HA_IP, - vrrp_port_id=VRRP_PORT_ID, - ha_port_id=HA_PORT_ID, - vrrp_id=VRRP_ID) - - @mock.patch('octavia.db.repositories.AmphoraRepository.associate') - def test_associate_failover_amphora_with_lb_id( - self, - mock_associate, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - assoc_fo_amp_lb_id = database_tasks.AssociateFailoverAmphoraWithLBID() - assoc_fo_amp_lb_id.execute(AMP_ID, LB_ID) - - mock_associate.assert_called_once_with('TEST', - load_balancer_id=LB_ID, - amphora_id=AMP_ID) - - # Test revert - assoc_fo_amp_lb_id.revert(AMP_ID) - - mock_amphora_repo_update.assert_called_once_with('TEST', - AMP_ID, - loadbalancer_id=None) - - # Test revert with exception - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - - assoc_fo_amp_lb_id.revert(AMP_ID) - - mock_amphora_repo_update.assert_called_once_with('TEST', - AMP_ID, - loadbalancer_id=None) - - @mock.patch('octavia.db.repositories.AmphoraRepository.get', - return_value=_amphora_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', - return_value=_loadbalancer_mock) - def test_mark_lb_amphorae_deleted_in_db(self, - mock_loadbalancer_repo_get, - mock_amphora_repo_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_deleted_in_db = (database_tasks. - MarkLBAmphoraeDeletedInDB()) - mark_amp_deleted_in_db.execute(_loadbalancer_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.DELETED) - - @mock.patch('octavia.db.repositories.AmphoraRepository.get', - return_value=_amphora_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', - return_value=_loadbalancer_mock) - def test_mark_amphora_allocated_in_db(self, - mock_loadbalancer_repo_get, - mock_amphora_repo_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_allocated_in_db = (database_tasks. - MarkAmphoraAllocatedInDB()) - mark_amp_allocated_in_db.execute(_amphora_mock, - self.loadbalancer_mock.id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.AMPHORA_ALLOCATED, - compute_id=COMPUTE_ID, - lb_network_ip=LB_NET_IP, - load_balancer_id=LB_ID) - - # Test the revert - - mock_amphora_repo_update.reset_mock() - mark_amp_allocated_in_db.revert(None, _amphora_mock, - self.loadbalancer_mock.id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - # Test the revert with exception - - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_allocated_in_db.revert(None, _amphora_mock, - self.loadbalancer_mock.id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - def test_mark_amphora_booting_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_booting_in_db = database_tasks.MarkAmphoraBootingInDB() - mark_amp_booting_in_db.execute(_amphora_mock.id, - _amphora_mock.compute_id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.AMPHORA_BOOTING, - compute_id=COMPUTE_ID) - - # Test the revert - - mock_amphora_repo_update.reset_mock() - mark_amp_booting_in_db.revert(None, _amphora_mock.id, - _amphora_mock.compute_id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.ERROR, - compute_id=COMPUTE_ID) - - # Test the revert with exception - - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_booting_in_db.revert(None, _amphora_mock.id, - _amphora_mock.compute_id) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.ERROR, - compute_id=COMPUTE_ID) - - def test_mark_amphora_deleted_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_deleted_in_db = database_tasks.MarkAmphoraDeletedInDB() - mark_amp_deleted_in_db.execute(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.DELETED) - - # Test the revert - mock_amphora_repo_update.reset_mock() - mark_amp_deleted_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - # Test the revert with exception - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_deleted_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - def test_mark_amphora_pending_delete_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_pending_delete_in_db = (database_tasks. - MarkAmphoraPendingDeleteInDB()) - mark_amp_pending_delete_in_db.execute(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.PENDING_DELETE) - - # Test the revert - mock_amphora_repo_update.reset_mock() - mark_amp_pending_delete_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - # Test the revert with exception - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - - mark_amp_pending_delete_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - def test_mark_amphora_pending_update_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_pending_update_in_db = (database_tasks. - MarkAmphoraPendingUpdateInDB()) - mark_amp_pending_update_in_db.execute(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.PENDING_UPDATE) - - # Test the revert - mock_amphora_repo_update.reset_mock() - mark_amp_pending_update_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - # Test the revert with exception - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_pending_update_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - id=AMP_ID, - status=constants.ERROR) - - def test_mark_amphora_ready_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - _amphora_mock.lb_network_ip = LB_NET_IP - - mark_amp_ready_in_db = database_tasks.MarkAmphoraReadyInDB() - mark_amp_ready_in_db.execute(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.AMPHORA_READY, - compute_id=COMPUTE_ID, - lb_network_ip=LB_NET_IP) - - # Test the revert - - mock_amphora_repo_update.reset_mock() - mark_amp_ready_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.ERROR, - compute_id=COMPUTE_ID, - lb_network_ip=LB_NET_IP) - - # Test the revert with exception - - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_ready_in_db.revert(_amphora_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - status=constants.ERROR, - compute_id=COMPUTE_ID, - lb_network_ip=LB_NET_IP) - - @mock.patch('octavia.db.repositories.AmphoraRepository.get') - def test_update_amphora_info(self, - mock_amphora_repo_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_amphora_info = database_tasks.UpdateAmphoraInfo() - update_amphora_info.execute(AMP_ID, _compute_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - lb_network_ip=LB_NET_IP, - cached_zone=CACHED_ZONE, - image_id=IMAGE_ID, - compute_flavor=COMPUTE_FLAVOR) - - repo.AmphoraRepository.get.assert_called_once_with( - 'TEST', - id=AMP_ID) - - def test_mark_listener_deleted_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_listener_deleted = database_tasks.MarkListenerDeletedInDB() - mark_listener_deleted.execute(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - LISTENER_ID, - provisioning_status=constants.DELETED) - - # Test the revert - mock_listener_repo_update.reset_mock() - mark_listener_deleted.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_listener_repo_update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - mark_listener_deleted.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - def test_mark_listener_pending_deleted_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_listener_pending_delete = (database_tasks. - MarkListenerPendingDeleteInDB()) - mark_listener_pending_delete.execute(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - LISTENER_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_listener_repo_update.reset_mock() - mark_listener_pending_delete.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_listener_repo_update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - mark_listener_pending_delete.revert(self.listener_mock) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.ListenerRepository.' - 'prov_status_active_if_not_error') - def test_mark_lb_and_listeners_active_in_db(self, - mock_list_not_error, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_lb_and_listeners_active = (database_tasks. - MarkLBAndListenersActiveInDB()) - mark_lb_and_listeners_active.execute(self.loadbalancer_mock, - [self.listener_mock]) - - mock_list_not_error.assert_called_once_with('TEST', LISTENER_ID) - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - provisioning_status=constants.ACTIVE) - - # Test the revert - mock_loadbalancer_repo_update.reset_mock() - mock_listener_repo_update.reset_mock() - - mark_lb_and_listeners_active.revert(self.loadbalancer_mock, - [self.listener_mock]) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - repo.LoadBalancerRepository.update.assert_not_called() - - # Test the revert with exceptions - mock_loadbalancer_repo_update.reset_mock() - mock_loadbalancer_repo_update.side_effect = Exception('fail') - mock_listener_repo_update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - - mark_lb_and_listeners_active.revert(self.loadbalancer_mock, - [self.listener_mock]) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - repo.LoadBalancerRepository.update.assert_not_called() - - @mock.patch('octavia.common.tls_utils.cert_parser.get_cert_expiration', - return_value=_cert_mock) - def test_update_amphora_db_cert_exp(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete, - mock_get_cert_exp): - - update_amp_cert = database_tasks.UpdateAmphoraDBCertExpiration() - key = utils.get_compatible_server_certs_key_passphrase() - fer = fernet.Fernet(key) - _pem_mock = fer.encrypt( - utils.get_compatible_value('test_cert') - ) - update_amp_cert.execute(_amphora_mock.id, _pem_mock) - - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - cert_expiration=_cert_mock) - - def test_update_amphora_cert_busy_to_false(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - amp_cert_busy_to_F = database_tasks.UpdateAmphoraCertBusyToFalse() - amp_cert_busy_to_F.execute(_amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', - AMP_ID, - cert_busy=False) - - def test_mark_LB_active_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_loadbalancer_active = database_tasks.MarkLBActiveInDB() - mark_loadbalancer_active.execute(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - provisioning_status=constants.ACTIVE) - self.assertEqual(0, repo.ListenerRepository.update.call_count) - - # Test the revert - mock_loadbalancer_repo_update.reset_mock() - mark_loadbalancer_active.revert(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_not_called() - self.assertEqual(0, repo.ListenerRepository.update.call_count) - - # Test the revert with exception - mock_loadbalancer_repo_update.reset_mock() - mock_loadbalancer_repo_update.side_effect = Exception('fail') - mark_loadbalancer_active.revert(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_not_called() - self.assertEqual(0, repo.ListenerRepository.update.call_count) - - def test_mark_LB_active_in_db_and_listeners(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - listeners = [data_models.Listener(id='listener1'), - data_models.Listener(id='listener2')] - lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners) - mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) - mark_lb_active.execute(lb) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - lb.id, - provisioning_status=constants.ACTIVE) - self.assertEqual(2, repo.ListenerRepository.update.call_count) - repo.ListenerRepository.update.assert_has_calls( - [mock.call('TEST', listeners[0].id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', listeners[1].id, - provisioning_status=constants.ACTIVE)]) - - mock_loadbalancer_repo_update.reset_mock() - mock_listener_repo_update.reset_mock() - mark_lb_active.revert(lb) - - repo.LoadBalancerRepository.update.assert_not_called() - self.assertEqual(2, repo.ListenerRepository.update.call_count) - repo.ListenerRepository.update.assert_has_calls( - [mock.call('TEST', listeners[0].id, - provisioning_status=constants.ERROR), - mock.call('TEST', listeners[1].id, - provisioning_status=constants.ERROR)]) - - @mock.patch('octavia.db.repositories.PoolRepository.update') - @mock.patch('octavia.db.repositories.MemberRepository.update') - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - def test_mark_LB_active_in_db_full_graph(self, - mock_l7r_repo_update, - mock_l7p_repo_update, - mock_hm_repo_update, - mock_member_repo_update, - mock_pool_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - unused_pool = data_models.Pool(id='unused_pool') - members1 = [data_models.Member(id='member1'), - data_models.Member(id='member2')] - health_monitor = data_models.HealthMonitor(id='hm1') - default_pool = data_models.Pool(id='default_pool', - members=members1, - health_monitor=health_monitor) - listener1 = data_models.Listener(id='listener1', - default_pool=default_pool) - members2 = [data_models.Member(id='member3'), - data_models.Member(id='member4')] - redirect_pool = data_models.Pool(id='redirect_pool', - members=members2) - l7rules = [data_models.L7Rule(id='rule1')] - redirect_policy = data_models.L7Policy(id='redirect_policy', - redirect_pool=redirect_pool, - l7rules=l7rules) - l7policies = [redirect_policy] - listener2 = data_models.Listener(id='listener2', - l7policies=l7policies) - listener2.l7policies = l7policies - listeners = [listener1, listener2] - pools = [default_pool, redirect_pool, unused_pool] - - lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, - pools=pools) - mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) - mark_lb_active.execute(lb) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - lb.id, - provisioning_status=constants.ACTIVE) - self.assertEqual(2, repo.ListenerRepository.update.call_count) - repo.ListenerRepository.update.assert_has_calls( - [mock.call('TEST', listeners[0].id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', listeners[1].id, - provisioning_status=constants.ACTIVE)]) - self.assertEqual(5, repo.PoolRepository.update.call_count) - repo.PoolRepository.update.assert_has_calls( - [mock.call('TEST', default_pool.id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', redirect_pool.id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', unused_pool.id, - provisioning_status=constants.ACTIVE)]) - self.assertEqual(8, repo.MemberRepository.update.call_count) - repo.MemberRepository.update.has_calls( - [mock.call('TEST', members1[0].id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', members1[1].id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', members2[0].id, - provisioning_status=constants.ACTIVE), - mock.call('TEST', members2[1].id, - provisioning_status=constants.ACTIVE)]) - self.assertEqual(2, repo.HealthMonitorRepository.update.call_count) - repo.HealthMonitorRepository.update.has_calls( - [mock.call('TEST', health_monitor.id, - provisioning_status=constants.ACTIVE)]) - self.assertEqual(1, repo.L7PolicyRepository.update.call_count) - repo.L7PolicyRepository.update.assert_has_calls( - [mock.call('TEST', l7policies[0].id, - provisioning_status=constants.ACTIVE)]) - self.assertEqual(1, repo.L7RuleRepository.update.call_count) - repo.L7RuleRepository.update.assert_has_calls( - [mock.call('TEST', l7rules[0].id, - provisioning_status=constants.ACTIVE)]) - - mock_loadbalancer_repo_update.reset_mock() - mock_listener_repo_update.reset_mock() - mock_pool_repo_update.reset_mock() - mock_member_repo_update.reset_mock() - mock_hm_repo_update.reset_mock() - mock_l7p_repo_update.reset_mock() - mock_l7r_repo_update.reset_mock() - mark_lb_active.revert(lb) - - repo.LoadBalancerRepository.update.assert_not_called() - self.assertEqual(2, repo.ListenerRepository.update.call_count) - repo.ListenerRepository.update.assert_has_calls( - [mock.call('TEST', listeners[0].id, - provisioning_status=constants.ERROR), - mock.call('TEST', listeners[1].id, - provisioning_status=constants.ERROR)]) - self.assertEqual(5, repo.PoolRepository.update.call_count) - repo.PoolRepository.update.has_calls( - [mock.call('TEST', default_pool.id, - provisioning_status=constants.ERROR), - mock.call('TEST', redirect_pool.id, - provisioning_status=constants.ERROR)]) - self.assertEqual(8, repo.MemberRepository.update.call_count) - repo.MemberRepository.update.has_calls( - [mock.call('TEST', members1[0].id, - provisioning_status=constants.ERROR), - mock.call('TEST', members1[1].id, - provisioning_status=constants.ERROR), - mock.call('TEST', members2[0].id, - provisioning_status=constants.ERROR), - mock.call('TEST', members2[1].id, - provisioning_status=constants.ERROR)]) - self.assertEqual(2, repo.HealthMonitorRepository.update.call_count) - repo.HealthMonitorRepository.update.has_calls( - [mock.call('TEST', health_monitor.id, - provisioning_status=constants.ERROR)]) - self.assertEqual(1, repo.L7PolicyRepository.update.call_count) - repo.L7PolicyRepository.update.assert_has_calls( - [mock.call('TEST', l7policies[0].id, - provisioning_status=constants.ERROR)]) - self.assertEqual(1, repo.L7RuleRepository.update.call_count) - repo.L7RuleRepository.update.assert_has_calls( - [mock.call('TEST', l7rules[0].id, - provisioning_status=constants.ERROR)]) - - def test_mark_LB_deleted_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_loadbalancer_deleted = database_tasks.MarkLBDeletedInDB() - mark_loadbalancer_deleted.execute(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - provisioning_status=constants.DELETED) - - # Test the revert - mock_loadbalancer_repo_update.reset_mock() - mark_loadbalancer_deleted.revert(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_not_called() - - # Test the revert with exception - mock_loadbalancer_repo_update.reset_mock() - mock_loadbalancer_repo_update.side_effect = Exception('fail') - mark_loadbalancer_deleted.revert(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_not_called() - - def test_mark_LB_pending_deleted_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_loadbalancer_pending_delete = (database_tasks. - MarkLBPendingDeleteInDB()) - mark_loadbalancer_pending_delete.execute(self.loadbalancer_mock) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - provisioning_status=constants.PENDING_DELETE) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - def test_update_health_monitor_in_db(self, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_health_mon = database_tasks.UpdateHealthMonInDB() - update_health_mon.execute(self.health_mon_mock, - {'delay': 1, 'timeout': 2}) - - repo.HealthMonitorRepository.update.assert_called_once_with( - 'TEST', - HM_ID, - delay=1, timeout=2) - - # Test the revert - mock_health_mon_repo_update.reset_mock() - update_health_mon.revert(self.health_mon_mock) - - repo.HealthMonitorRepository.update.assert_called_once_with( - 'TEST', - HM_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_health_mon_repo_update.reset_mock() - mock_health_mon_repo_update.side_effect = Exception('fail') - update_health_mon.revert(self.health_mon_mock) - - repo.HealthMonitorRepository.update.assert_called_once_with( - 'TEST', - HM_ID, - provisioning_status=constants.ERROR) - - def test_update_load_balancer_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_load_balancer = database_tasks.UpdateLoadbalancerInDB() - update_load_balancer.execute(self.loadbalancer_mock, - {'name': 'test', 'description': 'test2'}) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - name='test', description='test2') - - @mock.patch('octavia.db.repositories.VipRepository.update') - def test_update_vip_in_db_during_update_loadbalancer(self, - mock_vip_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_lb_update, - mock_listener_update, - mock_amphora_update, - mock_amphora_delete): - - self.loadbalancer_mock.vip.load_balancer_id = LB_ID - update_load_balancer = database_tasks.UpdateLoadbalancerInDB() - update_load_balancer.execute(self.loadbalancer_mock, - {'name': 'test', - 'description': 'test2', - 'vip': {'qos_policy_id': 'fool'}}) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - LB_ID, - name='test', description='test2') - - repo.VipRepository.update.assert_called_once_with('TEST', LB_ID, - qos_policy_id='fool') - - def test_update_listener_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_listener = database_tasks.UpdateListenerInDB() - update_listener.execute(self.listener_mock, - {'name': 'test', 'description': 'test2'}) - - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - LISTENER_ID, - name='test', description='test2') - - # Test the revert - mock_listener_repo_update.reset_mock() - update_listener.revert(self.listener_mock) - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - # Test the revert - mock_listener_repo_update.reset_mock() - mock_listener_repo_update.side_effect = Exception('fail') - update_listener.revert(self.listener_mock) - repo.ListenerRepository.update.assert_called_once_with( - 'TEST', - id=LISTENER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update') - def test_update_member_in_db(self, - mock_member_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_member = database_tasks.UpdateMemberInDB() - update_member.execute(self.member_mock, - {'weight': 1, 'ip_address': '10.1.0.0'}) - - repo.MemberRepository.update.assert_called_once_with( - 'TEST', - MEMBER_ID, - weight=1, ip_address='10.1.0.0') - - # Test the revert - mock_member_repo_update.reset_mock() - update_member.revert(self.member_mock) - - repo.MemberRepository.update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.ERROR) - - # Test the revert - mock_member_repo_update.reset_mock() - mock_member_repo_update.side_effect = Exception('fail') - update_member.revert(self.member_mock) - - repo.MemberRepository.update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.ERROR) - - @mock.patch( - 'octavia.db.repositories.Repositories.update_pool_and_sp') - def test_update_pool_in_db(self, - mock_repos_pool_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - sp_dict = {'type': 'SOURCE_IP', 'cookie_name': None} - update_dict = {'name': 'test', 'description': 'test2', - 'session_persistence': sp_dict} - update_pool = database_tasks.UpdatePoolInDB() - update_pool.execute(self.pool_mock, - update_dict) - - repo.Repositories.update_pool_and_sp.assert_called_once_with( - 'TEST', - POOL_ID, - update_dict) - - # Test the revert - mock_repos_pool_update.reset_mock() - update_pool.revert(self.pool_mock) - - repo.Repositories.update_pool_and_sp.assert_called_once_with( - 'TEST', - POOL_ID, - {'provisioning_status': constants.ERROR}) - - # Test the revert with exception - mock_repos_pool_update.reset_mock() - mock_repos_pool_update.side_effect = Exception('fail') - update_pool.revert(self.pool_mock) - - repo.Repositories.update_pool_and_sp.assert_called_once_with( - 'TEST', - POOL_ID, - {'provisioning_status': constants.ERROR}) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_update_l7policy_in_db(self, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_l7policy = database_tasks.UpdateL7PolicyInDB() - update_l7policy.execute(self.l7policy_mock, - {'action': constants.L7POLICY_ACTION_REJECT}) - - repo.L7PolicyRepository.update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - action=constants.L7POLICY_ACTION_REJECT) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - update_l7policy.revert(self.l7policy_mock) - - repo.L7PolicyRepository.update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - mock_l7policy_repo_update.side_effect = Exception('fail') - update_l7policy.revert(self.l7policy_mock) - - repo.L7PolicyRepository.update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_update_l7rule_in_db(self, - mock_l7rule_repo_update, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_l7rule = database_tasks.UpdateL7RuleInDB() - update_l7rule.execute( - self.l7rule_mock, - {'type': constants.L7RULE_TYPE_PATH, - 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, - 'value': '/api'}) - - repo.L7RuleRepository.update.assert_called_once_with( - 'TEST', - L7RULE_ID, - type=constants.L7RULE_TYPE_PATH, - compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH, - value='/api') - - # Test the revert - mock_l7rule_repo_update.reset_mock() - update_l7rule.revert(self.l7rule_mock) - - repo.L7PolicyRepository.update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert - mock_l7rule_repo_update.reset_mock() - mock_l7rule_repo_update.side_effect = Exception('fail') - update_l7rule.revert(self.l7rule_mock) - - repo.L7PolicyRepository.update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.ERROR) - - def test_get_amphora_details(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - get_amp_details = database_tasks.GetAmphoraDetails() - new_amp = get_amp_details.execute(_amphora_mock) - - self.assertEqual(AMP_ID, new_amp.id) - self.assertEqual(VRRP_IP, new_amp.vrrp_ip) - self.assertEqual(HA_IP, new_amp.ha_ip) - self.assertEqual(VRRP_PORT_ID, new_amp.vrrp_port_id) - self.assertEqual(AMP_ROLE, new_amp.role) - self.assertEqual(VRRP_ID, new_amp.vrrp_id) - self.assertEqual(VRRP_PRIORITY, new_amp.vrrp_priority) - - def test_mark_amphora_role_indb(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_amp_master_indb = database_tasks.MarkAmphoraMasterInDB() - mark_amp_master_indb.execute(_amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role='MASTER', - vrrp_priority=constants.ROLE_MASTER_PRIORITY) - - mock_amphora_repo_update.reset_mock() - - mark_amp_master_indb.revert("BADRESULT", _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role=None, vrrp_priority=None) - - mock_amphora_repo_update.reset_mock() - - failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) - mark_amp_master_indb.revert(failure_obj, _amphora_mock) - self.assertFalse(repo.AmphoraRepository.update.called) - - mock_amphora_repo_update.reset_mock() - - mark_amp_backup_indb = database_tasks.MarkAmphoraBackupInDB() - mark_amp_backup_indb.execute(_amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role='BACKUP', - vrrp_priority=constants.ROLE_BACKUP_PRIORITY) - - mock_amphora_repo_update.reset_mock() - - mark_amp_backup_indb.revert("BADRESULT", _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role=None, vrrp_priority=None) - - mock_amphora_repo_update.reset_mock() - - mark_amp_standalone_indb = database_tasks.MarkAmphoraStandAloneInDB() - mark_amp_standalone_indb.execute(_amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role='STANDALONE', - vrrp_priority=None) - - mock_amphora_repo_update.reset_mock() - - mark_amp_standalone_indb.revert("BADRESULT", _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role=None, vrrp_priority=None) - - # Test revert with exception - mock_amphora_repo_update.reset_mock() - mock_amphora_repo_update.side_effect = Exception('fail') - mark_amp_standalone_indb.revert("BADRESULT", _amphora_mock) - repo.AmphoraRepository.update.assert_called_once_with( - 'TEST', AMP_ID, role=None, vrrp_priority=None) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - @mock.patch('octavia.db.repositories.AmphoraRepository.get') - def test_get_amphorae_from_loadbalancer(self, - mock_amphora_get, - mock_lb_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - amp1 = mock.MagicMock() - amp1.id = uuidutils.generate_uuid() - amp2 = mock.MagicMock() - amp2.id = uuidutils.generate_uuid() - lb = mock.MagicMock() - lb.amphorae = [amp1, amp2] - mock_lb_get.return_value = lb - - mock_amphora_get.side_effect = [_amphora_mock, None] - - get_amps_from_lb_obj = database_tasks.GetAmphoraeFromLoadbalancer() - result = get_amps_from_lb_obj.execute(lb) - self.assertEqual([_amphora_mock], result) - - @mock.patch('octavia.db.repositories.ListenerRepository.get') - def test_get_listeners_from_loadbalancer(self, - mock_listener_get, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - mock_listener_get.return_value = _listener_mock - _loadbalancer_mock.listeners = [_listener_mock] - get_list_from_lb_obj = database_tasks.GetListenersFromLoadbalancer() - result = get_list_from_lb_obj.execute(_loadbalancer_mock) - mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id) - self.assertEqual([_listener_mock], result) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_get_loadbalancer(self, mock_lb_get, mock_generate_uuid, mock_LOG, - mock_get_session, mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - FAKE_LB = 'fake LB' - LB_ID = uuidutils.generate_uuid() - get_loadbalancer_obj = database_tasks.GetLoadBalancer() - - mock_lb_get.return_value = FAKE_LB - - result = get_loadbalancer_obj.execute(LB_ID) - - self.assertEqual(FAKE_LB, result) - mock_lb_get.assert_called_once_with('TEST', id=LB_ID) - - def test_get_vip_from_loadbalancer(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - _loadbalancer_mock.vip = _vip_mock - get_vip_from_lb_obj = database_tasks.GetVipFromLoadbalancer() - result = get_vip_from_lb_obj.execute(_loadbalancer_mock) - self.assertEqual(_vip_mock, result) - - @mock.patch('octavia.db.repositories.VRRPGroupRepository.create') - def test_create_vrrp_group_for_lb(self, - mock_vrrp_group_create, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mock_get_session.side_effect = ['TEST', - odb_exceptions.DBDuplicateEntry] - create_vrrp_group = database_tasks.CreateVRRPGroupForLB() - create_vrrp_group.execute(_loadbalancer_mock.id) - mock_vrrp_group_create.assert_called_once_with( - 'TEST', load_balancer_id=LB_ID, - vrrp_group_name=LB_ID.replace('-', ''), - vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, - vrrp_auth_pass=mock_generate_uuid.return_value.replace('-', - '')[0:7], - advert_int=1) - create_vrrp_group.execute(_loadbalancer_mock) - - @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') - def test_disable_amphora_health_monitoring(self, - mock_amp_health_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - disable_amp_health = database_tasks.DisableAmphoraHealthMonitoring() - disable_amp_health.execute(_amphora_mock) - mock_amp_health_repo_delete.assert_called_once_with( - 'TEST', amphora_id=AMP_ID) - - @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') - def test_disable_lb_amphorae_health_monitoring( - self, - mock_amp_health_repo_delete, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - disable_amp_health = ( - database_tasks.DisableLBAmphoraeHealthMonitoring()) - disable_amp_health.execute(_loadbalancer_mock) - mock_amp_health_repo_delete.assert_called_once_with( - 'TEST', amphora_id=AMP_ID) - - @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') - def test_mark_amphora_health_monitoring_busy(self, - mock_amp_health_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - mark_busy = database_tasks.MarkAmphoraHealthBusy() - mark_busy.execute(_amphora_mock) - mock_amp_health_repo_update.assert_called_once_with( - 'TEST', amphora_id=AMP_ID, busy=True) - - @mock.patch('octavia.db.repositories.AmphoraHealthRepository.update') - def test_mark_lb_amphorae_health_monitoring_busy( - self, - mock_amp_health_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - mark_busy = ( - database_tasks.MarkLBAmphoraeHealthBusy()) - mark_busy.execute(_loadbalancer_mock) - mock_amp_health_repo_update.assert_called_once_with( - 'TEST', amphora_id=AMP_ID, busy=True) - - def test_update_lb_server_group_in_db(self, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_server_group_info = database_tasks.UpdateLBServerGroupInDB() - update_server_group_info.execute(LB_ID, SERVER_GROUP_ID) - - repo.LoadBalancerRepository.update.assert_called_once_with( - 'TEST', - id=LB_ID, - server_group_id=SERVER_GROUP_ID) - - # Test the revert - mock_listener_repo_update.reset_mock() - update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) - - # Test the revert with exception - mock_listener_repo_update.reset_mock() - mock_loadbalancer_repo_update.side_effect = Exception('fail') - update_server_group_info.revert(LB_ID, SERVER_GROUP_ID) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - def test_mark_health_mon_active_in_db(self, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_health_mon_active = (database_tasks.MarkHealthMonitorActiveInDB()) - mark_health_mon_active.execute(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - HM_ID, - operating_status=constants.ONLINE, - provisioning_status=constants.ACTIVE) - - # Test the revert - mock_health_mon_repo_update.reset_mock() - mark_health_mon_active.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_health_mon_repo_update.reset_mock() - mock_health_mon_repo_update.side_effect = Exception('fail') - mark_health_mon_active.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - def test_mark_health_mon_pending_create_in_db( - self, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_health_mon_pending_create = (database_tasks. - MarkHealthMonitorPendingCreateInDB()) - mark_health_mon_pending_create.execute(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - HM_ID, - provisioning_status=constants.PENDING_CREATE) - - # Test the revert - mock_health_mon_repo_update.reset_mock() - mark_health_mon_pending_create.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_health_mon_repo_update.reset_mock() - mock_health_mon_repo_update.side_effect = Exception('fail') - mark_health_mon_pending_create.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - def test_mark_health_mon_pending_delete_in_db( - self, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_health_mon_pending_delete = (database_tasks. - MarkHealthMonitorPendingDeleteInDB()) - mark_health_mon_pending_delete.execute(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - HM_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_health_mon_repo_update.reset_mock() - mark_health_mon_pending_delete.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_health_mon_repo_update.reset_mock() - mock_health_mon_repo_update.side_effect = Exception('fail') - mark_health_mon_pending_delete.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') - def test_mark_health_mon_pending_update_in_db( - self, - mock_health_mon_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_health_mon_pending_update = (database_tasks. - MarkHealthMonitorPendingUpdateInDB()) - mark_health_mon_pending_update.execute(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - HM_ID, - provisioning_status=constants.PENDING_UPDATE) - - # Test the revert - mock_health_mon_repo_update.reset_mock() - mark_health_mon_pending_update.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_health_mon_repo_update.reset_mock() - mock_health_mon_repo_update.side_effect = Exception('fail') - mark_health_mon_pending_update.revert(self.health_mon_mock) - - mock_health_mon_repo_update.assert_called_once_with( - 'TEST', - id=HM_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_mark_l7policy_active_in_db(self, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7policy_active = (database_tasks.MarkL7PolicyActiveInDB()) - mark_l7policy_active.execute(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.ACTIVE, - operating_status=constants.ONLINE) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - mark_l7policy_active.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7policy_repo_update.reset_mock() - mock_l7policy_repo_update.side_effect = Exception('fail') - mark_l7policy_active.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_mark_l7policy_pending_create_in_db(self, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7policy_pending_create = (database_tasks. - MarkL7PolicyPendingCreateInDB()) - mark_l7policy_pending_create.execute(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.PENDING_CREATE) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - mark_l7policy_pending_create.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7policy_repo_update.reset_mock() - mock_l7policy_repo_update.side_effect = Exception('fail') - mark_l7policy_pending_create.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_mark_l7policy_pending_delete_in_db(self, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7policy_pending_delete = (database_tasks. - MarkL7PolicyPendingDeleteInDB()) - mark_l7policy_pending_delete.execute(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - mark_l7policy_pending_delete.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7policy_repo_update.reset_mock() - mock_l7policy_repo_update.side_effect = Exception('fail') - mark_l7policy_pending_delete.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7PolicyRepository.update') - def test_mark_l7policy_pending_update_in_db(self, - mock_l7policy_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7policy_pending_update = (database_tasks. - MarkL7PolicyPendingUpdateInDB()) - mark_l7policy_pending_update.execute(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - L7POLICY_ID, - provisioning_status=constants.PENDING_UPDATE) - - # Test the revert - mock_l7policy_repo_update.reset_mock() - mark_l7policy_pending_update.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7policy_repo_update.reset_mock() - mock_l7policy_repo_update.side_effect = Exception('fail') - mark_l7policy_pending_update.revert(self.l7policy_mock) - - mock_l7policy_repo_update.assert_called_once_with( - 'TEST', - id=L7POLICY_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - def test_mark_l7rule_active_in_db(self, - mock_l7rule_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7rule_active = (database_tasks.MarkL7RuleActiveInDB()) - mark_l7rule_active.execute(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - L7RULE_ID, - provisioning_status=constants.ACTIVE, - operating_status=constants.ONLINE) - - # Test the revert - mock_l7rule_repo_update.reset_mock() - mark_l7rule_active.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7rule_repo_update.reset_mock() - mock_l7rule_repo_update.side_effect = Exception('fail') - mark_l7rule_active.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - def test_mark_l7rule_pending_create_in_db(self, - mock_l7rule_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7rule_pending_create = (database_tasks. - MarkL7RulePendingCreateInDB()) - mark_l7rule_pending_create.execute(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - L7RULE_ID, - provisioning_status=constants.PENDING_CREATE) - - # Test the revert - mock_l7rule_repo_update.reset_mock() - mark_l7rule_pending_create.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7rule_repo_update.reset_mock() - mock_l7rule_repo_update.side_effect = Exception('fail') - mark_l7rule_pending_create.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - def test_mark_l7rule_pending_delete_in_db(self, - mock_l7rule_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7rule_pending_delete = (database_tasks. - MarkL7RulePendingDeleteInDB()) - mark_l7rule_pending_delete.execute(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - L7RULE_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_l7rule_repo_update.reset_mock() - mark_l7rule_pending_delete.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7rule_repo_update.reset_mock() - mock_l7rule_repo_update.side_effect = Exception('fail') - mark_l7rule_pending_delete.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.L7RuleRepository.update') - def test_mark_l7rule_pending_update_in_db(self, - mock_l7rule_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_l7rule_pending_update = (database_tasks. - MarkL7RulePendingUpdateInDB()) - mark_l7rule_pending_update.execute(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - L7RULE_ID, - provisioning_status=constants.PENDING_UPDATE) - - # Test the revert - mock_l7rule_repo_update.reset_mock() - mark_l7rule_pending_update.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_l7rule_repo_update.reset_mock() - mock_l7rule_repo_update.side_effect = Exception('fail') - mark_l7rule_pending_update.revert(self.l7rule_mock) - - mock_l7rule_repo_update.assert_called_once_with( - 'TEST', - id=L7RULE_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update') - def test_mark_member_active_in_db(self, - mock_member_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_member_active = (database_tasks.MarkMemberActiveInDB()) - mark_member_active.execute(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.ACTIVE) - - # Test the revert - mock_member_repo_update.reset_mock() - mark_member_active.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_member_repo_update.reset_mock() - mock_member_repo_update.side_effect = Exception('fail') - mark_member_active.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update') - def test_mark_member_pending_create_in_db(self, - mock_member_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_member_pending_create = (database_tasks. - MarkMemberPendingCreateInDB()) - mark_member_pending_create.execute(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.PENDING_CREATE) - - # Test the revert - mock_member_repo_update.reset_mock() - mark_member_pending_create.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_member_repo_update.reset_mock() - mock_member_repo_update.side_effect = Exception('fail') - mark_member_pending_create.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update') - def test_mark_member_pending_delete_in_db(self, - mock_member_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_member_pending_delete = (database_tasks. - MarkMemberPendingDeleteInDB()) - mark_member_pending_delete.execute(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_member_repo_update.reset_mock() - mark_member_pending_delete.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_member_repo_update.reset_mock() - mock_member_repo_update.side_effect = Exception('fail') - mark_member_pending_delete.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update') - def test_mark_member_pending_update_in_db(self, - mock_member_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_member_pending_update = (database_tasks. - MarkMemberPendingUpdateInDB()) - mark_member_pending_update.execute(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - MEMBER_ID, - provisioning_status=constants.PENDING_UPDATE) - - # Test the revert - mock_member_repo_update.reset_mock() - mark_member_pending_update.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_member_repo_update.reset_mock() - mock_member_repo_update.side_effect = Exception('fail') - mark_member_pending_update.revert(self.member_mock) - - mock_member_repo_update.assert_called_once_with( - 'TEST', - id=MEMBER_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.PoolRepository.update') - def test_mark_pool_active_in_db(self, - mock_pool_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_pool_active = (database_tasks.MarkPoolActiveInDB()) - mark_pool_active.execute(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - POOL_ID, - provisioning_status=constants.ACTIVE) - - # Test the revert - mock_pool_repo_update.reset_mock() - mark_pool_active.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_pool_repo_update.reset_mock() - mock_pool_repo_update.side_effect = Exception('fail') - mark_pool_active.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.PoolRepository.update') - def test_mark_pool_pending_create_in_db(self, - mock_pool_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_pool_pending_create = (database_tasks.MarkPoolPendingCreateInDB()) - mark_pool_pending_create.execute(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - POOL_ID, - provisioning_status=constants.PENDING_CREATE) - - # Test the revert - mock_pool_repo_update.reset_mock() - mark_pool_pending_create.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_pool_repo_update.reset_mock() - mock_pool_repo_update.side_effect = Exception('fail') - mark_pool_pending_create.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.PoolRepository.update') - def test_mark_pool_pending_delete_in_db(self, - mock_pool_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_pool_pending_delete = (database_tasks.MarkPoolPendingDeleteInDB()) - mark_pool_pending_delete.execute(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - POOL_ID, - provisioning_status=constants.PENDING_DELETE) - - # Test the revert - mock_pool_repo_update.reset_mock() - mark_pool_pending_delete.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_pool_repo_update.reset_mock() - mock_pool_repo_update.side_effect = Exception('fail') - mark_pool_pending_delete.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.PoolRepository.update') - def test_mark_pool_pending_update_in_db(self, - mock_pool_repo_update, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - mark_pool_pending_update = (database_tasks. - MarkPoolPendingUpdateInDB()) - mark_pool_pending_update.execute(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - POOL_ID, - provisioning_status=constants.PENDING_UPDATE) - - # Test the revert - mock_pool_repo_update.reset_mock() - mark_pool_pending_update.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - # Test the revert with exception - mock_pool_repo_update.reset_mock() - mock_pool_repo_update.side_effect = Exception('fail') - mark_pool_pending_update.revert(self.pool_mock) - - mock_pool_repo_update.assert_called_once_with( - 'TEST', - id=POOL_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.MemberRepository.update_pool_members') - def test_update_pool_members_operating_status_in_db( - self, - mock_member_repo_update_pool_members, - mock_generate_uuid, - mock_LOG, - mock_get_session, - mock_loadbalancer_repo_update, - mock_listener_repo_update, - mock_amphora_repo_update, - mock_amphora_repo_delete): - - update_members = database_tasks.UpdatePoolMembersOperatingStatusInDB() - update_members.execute(self.pool_mock, constants.ONLINE) - - mock_member_repo_update_pool_members.assert_called_once_with( - 'TEST', - POOL_ID, - operating_status=constants.ONLINE) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py deleted file mode 100644 index fc02ad7d3a..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +++ /dev/null @@ -1,415 +0,0 @@ -# Copyright 2017 Rackspace, US Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from oslo_utils import uuidutils -from taskflow.types import failure - -from octavia.common import data_models -from octavia.common import exceptions -from octavia.controller.worker.v1.tasks import database_tasks -import octavia.tests.unit.base as base - - -class TestDatabaseTasksQuota(base.TestCase): - - def setUp(self): - - self._tf_failure_mock = mock.Mock(spec=failure.Failure) - self.zero_pool_child_count = {'HM': 0, 'member': 0} - - super().setUp() - - @mock.patch('octavia.db.api.get_session', return_value='TEST') - @mock.patch('octavia.db.repositories.Repositories.decrement_quota') - @mock.patch('octavia.db.repositories.Repositories.check_quota_met') - def _test_decrement_quota(self, - task, - data_model, - mock_check_quota_met, - mock_decrement_quota, - mock_get_session): - - project_id = uuidutils.generate_uuid() - test_object = mock.MagicMock() - test_object.project_id = project_id - - # execute without exception - mock_decrement_quota.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_session = mock.MagicMock() - mock_get_session_local.return_value = mock_session - - if data_model == data_models.Pool: - task.execute(test_object, self.zero_pool_child_count) - else: - if data_model == data_models.L7Policy: - test_object.l7rules = [] - task.execute(test_object) - - mock_decrement_quota.assert_called_once_with( - mock_session, data_model, project_id) - - mock_session.commit.assert_called_once_with() - - # execute with exception - mock_decrement_quota.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_session = mock.MagicMock() - mock_get_session_local.return_value = mock_session - - mock_decrement_quota.side_effect = ( - exceptions.OctaviaException('fail')) - if data_model == data_models.Pool: - self.assertRaises(exceptions.OctaviaException, - task.execute, - test_object, - self.zero_pool_child_count) - else: - self.assertRaises(exceptions.OctaviaException, - task.execute, - test_object) - - mock_decrement_quota.assert_called_once_with( - mock_session, data_model, project_id) - - mock_session.rollback.assert_called_once_with() - - # revert with instance of failure - mock_get_session.reset_mock() - mock_check_quota_met.reset_mock() - if data_model == data_models.Pool: - task.revert(test_object, - self.zero_pool_child_count, - self._tf_failure_mock) - else: - if data_model == data_models.L7Policy: - test_object.l7rules = [] - task.revert(test_object, self._tf_failure_mock) - self.assertFalse(mock_get_session.called) - self.assertFalse(mock_check_quota_met.called) - - # revert - mock_check_quota_met.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_session = mock.MagicMock() - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session] - - if data_model == data_models.Pool: - task.revert(test_object, self.zero_pool_child_count, None) - else: - task.revert(test_object, None) - - mock_check_quota_met.assert_called_once_with( - mock_session, mock_lock_session, data_model, - project_id) - - mock_lock_session.commit.assert_called_once_with() - - # revert with rollback - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_session = mock.MagicMock() - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session] - mock_check_quota_met.side_effect = ( - exceptions.OctaviaException('fail')) - - if data_model == data_models.Pool: - task.revert(test_object, self.zero_pool_child_count, None) - else: - task.revert(test_object, None) - - mock_lock_session.rollback.assert_called_once_with() - - # revert with db exception - mock_check_quota_met.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_get_session_local.side_effect = Exception('fail') - - if data_model == data_models.Pool: - task.revert(test_object, self.zero_pool_child_count, None) - else: - task.revert(test_object, None) - - self.assertFalse(mock_check_quota_met.called) - - def test_decrement_health_monitor_quota(self): - task = database_tasks.DecrementHealthMonitorQuota() - data_model = data_models.HealthMonitor - self._test_decrement_quota(task, data_model) - - def test_decrement_listener_quota(self): - task = database_tasks.DecrementListenerQuota() - data_model = data_models.Listener - self._test_decrement_quota(task, data_model) - - def test_decrement_loadbalancer_quota(self): - task = database_tasks.DecrementLoadBalancerQuota() - data_model = data_models.LoadBalancer - self._test_decrement_quota(task, data_model) - - def test_decrement_pool_quota(self): - task = database_tasks.DecrementPoolQuota() - data_model = data_models.Pool - self._test_decrement_quota(task, data_model) - - def test_decrement_member_quota(self): - task = database_tasks.DecrementMemberQuota() - data_model = data_models.Member - self._test_decrement_quota(task, data_model) - - @mock.patch('octavia.db.repositories.Repositories.decrement_quota') - @mock.patch('octavia.db.repositories.Repositories.check_quota_met') - def test_decrement_pool_quota_pool_children(self, - mock_check_quota_met, - mock_decrement_quota): - pool_child_count = {'HM': 1, 'member': 2} - project_id = uuidutils.generate_uuid() - test_object = mock.MagicMock() - test_object.project_id = project_id - task = database_tasks.DecrementPoolQuota() - mock_session = mock.MagicMock() - - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_get_session_local.return_value = mock_session - - task.execute(test_object, pool_child_count) - - calls = [mock.call(mock_session, data_models.Pool, project_id), - mock.call(mock_session, data_models.HealthMonitor, - project_id), - mock.call(mock_session, data_models.Member, project_id, - quantity=2)] - - mock_decrement_quota.assert_has_calls(calls) - - mock_session.commit.assert_called_once_with() - - # revert - mock_session.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session] - - task.revert(test_object, pool_child_count, None) - - calls = [mock.call(mock_session, mock_lock_session, - data_models.Pool, project_id), - mock.call(mock_session, mock_lock_session, - data_models.HealthMonitor, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id)] - - mock_check_quota_met.assert_has_calls(calls) - - self.assertEqual(4, mock_lock_session.commit.call_count) - - # revert with health monitor quota exception - mock_session.reset_mock() - mock_check_quota_met.side_effect = [None, Exception('fail'), None, - None] - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session] - - task.revert(test_object, pool_child_count, None) - - calls = [mock.call(mock_session, mock_lock_session, - data_models.Pool, project_id), - mock.call(mock_session, mock_lock_session, - data_models.HealthMonitor, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id)] - - mock_check_quota_met.assert_has_calls(calls) - - self.assertEqual(3, mock_lock_session.commit.call_count) - self.assertEqual(1, mock_lock_session.rollback.call_count) - - # revert with member quota exception - mock_session.reset_mock() - mock_check_quota_met.side_effect = [None, None, None, - Exception('fail')] - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session] - - task.revert(test_object, pool_child_count, None) - - calls = [mock.call(mock_session, mock_lock_session, - data_models.Pool, project_id), - mock.call(mock_session, mock_lock_session, - data_models.HealthMonitor, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id), - mock.call(mock_session, mock_lock_session, - data_models.Member, project_id)] - - mock_check_quota_met.assert_has_calls(calls) - - self.assertEqual(3, mock_lock_session.commit.call_count) - self.assertEqual(1, mock_lock_session.rollback.call_count) - - def test_count_pool_children_for_quota(self): - project_id = uuidutils.generate_uuid() - member1 = data_models.Member(id=1, project_id=project_id) - member2 = data_models.Member(id=2, project_id=project_id) - healtmon = data_models.HealthMonitor(id=1, project_id=project_id) - pool_no_children = data_models.Pool(id=1, project_id=project_id) - pool_1_mem = data_models.Pool(id=1, project_id=project_id, - members=[member1]) - pool_hm = data_models.Pool(id=1, project_id=project_id, - health_monitor=healtmon) - pool_hm_2_mem = data_models.Pool(id=1, project_id=project_id, - health_monitor=healtmon, - members=[member1, member2]) - task = database_tasks.CountPoolChildrenForQuota() - - # Test pool with no children - result = task.execute(pool_no_children) - - self.assertEqual({'HM': 0, 'member': 0}, result) - - # Test pool with one member - result = task.execute(pool_1_mem) - - self.assertEqual({'HM': 0, 'member': 1}, result) - - # Test pool with health monitor and no members - result = task.execute(pool_hm) - - self.assertEqual({'HM': 1, 'member': 0}, result) - - # Test pool with health monitor and two members - result = task.execute(pool_hm_2_mem) - - self.assertEqual({'HM': 1, 'member': 2}, result) - - def test_decrement_l7policy_quota(self): - task = database_tasks.DecrementL7policyQuota() - data_model = data_models.L7Policy - self._test_decrement_quota(task, data_model) - - @mock.patch('octavia.db.repositories.Repositories.decrement_quota') - @mock.patch('octavia.db.repositories.Repositories.check_quota_met') - def test_decrement_l7policy_quota_with_children(self, - mock_check_quota_met, - mock_decrement_quota): - project_id = uuidutils.generate_uuid() - test_l7rule1 = mock.MagicMock() - test_l7rule1.project_id = project_id - test_l7rule2 = mock.MagicMock() - test_l7rule2.project_id = project_id - test_object = mock.MagicMock() - test_object.project_id = project_id - test_object.l7rules = [test_l7rule1, test_l7rule2] - task = database_tasks.DecrementL7policyQuota() - mock_session = mock.MagicMock() - - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_get_session_local.return_value = mock_session - - task.execute(test_object) - - calls = [mock.call(mock_session, data_models.L7Policy, project_id), - mock.call(mock_session, data_models.L7Rule, project_id, - quantity=2)] - - mock_decrement_quota.assert_has_calls(calls) - - mock_session.commit.assert_called_once_with() - - # revert - mock_session.reset_mock() - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session] - - task.revert(test_object, None) - - calls = [mock.call(mock_session, mock_lock_session, - data_models.L7Policy, project_id), - mock.call(mock_session, mock_lock_session, - data_models.L7Rule, project_id), - mock.call(mock_session, mock_lock_session, - data_models.L7Rule, project_id)] - - mock_check_quota_met.assert_has_calls(calls) - - self.assertEqual(3, mock_lock_session.commit.call_count) - - # revert with l7rule quota exception - mock_session.reset_mock() - mock_check_quota_met.side_effect = [None, None, - Exception('fail')] - with mock.patch('octavia.db.api.' - 'get_session') as mock_get_session_local: - mock_lock_session = mock.MagicMock() - mock_get_session_local.side_effect = [mock_session, - mock_lock_session, - mock_lock_session, - mock_lock_session] - - task.revert(test_object, None) - - calls = [mock.call(mock_session, mock_lock_session, - data_models.L7Policy, project_id), - mock.call(mock_session, mock_lock_session, - data_models.L7Rule, project_id), - mock.call(mock_session, mock_lock_session, - data_models.L7Rule, project_id)] - - mock_check_quota_met.assert_has_calls(calls) - - self.assertEqual(2, mock_lock_session.commit.call_count) - self.assertEqual(1, mock_lock_session.rollback.call_count) - - def test_decrement_l7rule_quota(self): - task = database_tasks.DecrementL7ruleQuota() - data_model = data_models.L7Rule - self._test_decrement_quota(task, data_model) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py deleted file mode 100644 index a766e619ac..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright 2016 Rackspace -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_utils import uuidutils - -from octavia.controller.worker.v1.tasks import lifecycle_tasks -import octavia.tests.unit.base as base - - -class TestLifecycleTasks(base.TestCase): - - def setUp(self): - - self.AMPHORA = mock.MagicMock() - self.AMPHORA_ID = uuidutils.generate_uuid() - self.AMPHORA.id = self.AMPHORA_ID - self.HEALTH_MON = mock.MagicMock() - self.HEALTH_MON_ID = uuidutils.generate_uuid() - self.HEALTH_MON.id = self.HEALTH_MON_ID - self.L7POLICY = mock.MagicMock() - self.L7POLICY_ID = uuidutils.generate_uuid() - self.L7POLICY.id = self.L7POLICY_ID - self.L7RULE = mock.MagicMock() - self.L7RULE_ID = uuidutils.generate_uuid() - self.L7RULE.id = self.L7RULE_ID - self.LISTENER = mock.MagicMock() - self.LISTENER_ID = uuidutils.generate_uuid() - self.LISTENER.id = self.LISTENER_ID - self.LISTENERS = [self.LISTENER] - self.LOADBALANCER = mock.MagicMock() - self.LOADBALANCER_ID = uuidutils.generate_uuid() - self.LOADBALANCER.id = self.LOADBALANCER_ID - self.LISTENER.load_balancer = self.LOADBALANCER - self.MEMBER = mock.MagicMock() - self.MEMBER_ID = uuidutils.generate_uuid() - self.MEMBER.id = self.MEMBER_ID - self.MEMBERS = [self.MEMBER] - self.POOL = mock.MagicMock() - self.POOL_ID = uuidutils.generate_uuid() - self.POOL.id = self.POOL_ID - - super().setUp() - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'unmark_amphora_health_busy') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_amphora_status_error') - def test_AmphoraIDToErrorOnRevertTask(self, mock_amp_status_error, - mock_amp_health_busy): - - amp_id_to_error_on_revert = (lifecycle_tasks. - AmphoraIDToErrorOnRevertTask()) - - # Execute - amp_id_to_error_on_revert.execute(self.AMPHORA_ID) - - self.assertFalse(mock_amp_status_error.called) - - # Revert - amp_id_to_error_on_revert.revert(self.AMPHORA_ID) - - mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) - self.assertFalse(mock_amp_health_busy.called) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'unmark_amphora_health_busy') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_amphora_status_error') - def test_AmphoraToErrorOnRevertTask(self, mock_amp_status_error, - mock_amp_health_busy): - - amp_to_error_on_revert = lifecycle_tasks.AmphoraToErrorOnRevertTask() - - # Execute - amp_to_error_on_revert.execute(self.AMPHORA) - - self.assertFalse(mock_amp_status_error.called) - - # Revert - amp_to_error_on_revert.revert(self.AMPHORA) - - mock_amp_status_error.assert_called_once_with(self.AMPHORA_ID) - self.assertFalse(mock_amp_health_busy.called) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_health_mon_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - def test_HealthMonitorToErrorOnRevertTask( - self, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_health_mon_prov_status_error): - - health_mon_to_error_on_revert = (lifecycle_tasks. - HealthMonitorToErrorOnRevertTask()) - - # Execute - health_mon_to_error_on_revert.execute(self.HEALTH_MON, - self.LISTENERS, - self.LOADBALANCER) - - self.assertFalse(mock_health_mon_prov_status_error.called) - - # Revert - health_mon_to_error_on_revert.revert(self.HEALTH_MON, - self.LISTENERS, - self.LOADBALANCER) - - mock_health_mon_prov_status_error.assert_called_once_with( - self.HEALTH_MON_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_l7policy_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - def test_L7PolicyToErrorOnRevertTask( - self, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_l7policy_prov_status_error): - - l7policy_to_error_on_revert = (lifecycle_tasks. - L7PolicyToErrorOnRevertTask()) - - # Execute - l7policy_to_error_on_revert.execute(self.L7POLICY, - self.LISTENERS, - self.LOADBALANCER) - - self.assertFalse(mock_l7policy_prov_status_error.called) - - # Revert - l7policy_to_error_on_revert.revert(self.L7POLICY, - self.LISTENERS, - self.LOADBALANCER) - - mock_l7policy_prov_status_error.assert_called_once_with( - self.L7POLICY_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_l7rule_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - def test_L7RuleToErrorOnRevertTask( - self, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_l7rule_prov_status_error): - - l7rule_to_error_on_revert = (lifecycle_tasks. - L7RuleToErrorOnRevertTask()) - - # Execute - l7rule_to_error_on_revert.execute(self.L7RULE, - self.LISTENERS, - self.LOADBALANCER) - - self.assertFalse(mock_l7rule_prov_status_error.called) - - # Revert - l7rule_to_error_on_revert.revert(self.L7RULE, - self.LISTENERS, - self.LOADBALANCER) - - mock_l7rule_prov_status_error.assert_called_once_with( - self.L7RULE_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_error') - def test_ListenerToErrorOnRevertTask( - self, - mock_listener_prov_status_error, - mock_loadbalancer_prov_status_active): - - listener_to_error_on_revert = (lifecycle_tasks. - ListenerToErrorOnRevertTask()) - - # Execute - listener_to_error_on_revert.execute(self.LISTENER) - - self.assertFalse(mock_listener_prov_status_error.called) - - # Revert - listener_to_error_on_revert.revert(self.LISTENER) - - mock_listener_prov_status_error.assert_called_once_with( - self.LISTENER_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_error') - def test_ListenersToErrorOnRevertTask( - self, - mock_listener_prov_status_error, - mock_loadbalancer_prov_status_active): - - listeners_to_error_on_revert = (lifecycle_tasks. - ListenersToErrorOnRevertTask()) - - # Execute - listeners_to_error_on_revert.execute(self.LISTENERS, - self.LOADBALANCER) - - self.assertFalse(mock_listener_prov_status_error.called) - - # Revert - listeners_to_error_on_revert.revert(self.LISTENERS, - self.LOADBALANCER) - - mock_listener_prov_status_error.assert_called_once_with( - self.LISTENER_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_error') - def test_LoadBalancerIDToErrorOnRevertTask( - self, - mock_loadbalancer_prov_status_error): - - loadbalancer_id_to_error_on_revert = ( - lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask()) - - # Execute - loadbalancer_id_to_error_on_revert.execute(self.LOADBALANCER_ID) - - self.assertFalse(mock_loadbalancer_prov_status_error.called) - - # Revert - loadbalancer_id_to_error_on_revert.revert(self.LOADBALANCER_ID) - - mock_loadbalancer_prov_status_error.assert_called_once_with( - self.LOADBALANCER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_error') - def test_LoadBalancerToErrorOnRevertTask( - self, - mock_loadbalancer_prov_status_error): - - loadbalancer_to_error_on_revert = ( - lifecycle_tasks.LoadBalancerToErrorOnRevertTask()) - - # Execute - loadbalancer_to_error_on_revert.execute(self.LOADBALANCER) - - self.assertFalse(mock_loadbalancer_prov_status_error.called) - - # Revert - loadbalancer_to_error_on_revert.revert(self.LOADBALANCER) - - mock_loadbalancer_prov_status_error.assert_called_once_with( - self.LOADBALANCER_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_member_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_pool_prov_status_active') - def test_MemberToErrorOnRevertTask( - self, - mock_pool_prov_status_active, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_member_prov_status_error): - member_to_error_on_revert = lifecycle_tasks.MemberToErrorOnRevertTask() - - # Execute - member_to_error_on_revert.execute(self.MEMBER, - self.LISTENERS, - self.LOADBALANCER, - self.POOL) - - self.assertFalse(mock_member_prov_status_error.called) - - # Revert - member_to_error_on_revert.revert(self.MEMBER, - self.LISTENERS, - self.LOADBALANCER, - self.POOL) - - mock_member_prov_status_error.assert_called_once_with( - self.MEMBER_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) - mock_pool_prov_status_active.assert_called_once_with( - self.POOL_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_member_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_pool_prov_status_active') - def test_MembersToErrorOnRevertTask( - self, - mock_pool_prov_status_active, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_member_prov_status_error): - members_to_error_on_revert = ( - lifecycle_tasks.MembersToErrorOnRevertTask()) - - # Execute - members_to_error_on_revert.execute(self.MEMBERS, - self.LISTENERS, - self.LOADBALANCER, - self.POOL) - - self.assertFalse(mock_member_prov_status_error.called) - - # Revert - members_to_error_on_revert.revert(self.MEMBERS, - self.LISTENERS, - self.LOADBALANCER, - self.POOL) - - mock_member_prov_status_error.assert_called_once_with( - self.MEMBER_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) - mock_pool_prov_status_active.assert_called_once_with( - self.POOL_ID) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_pool_prov_status_error') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_loadbalancer_prov_status_active') - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'mark_listener_prov_status_active') - def test_PoolToErrorOnRevertTask( - self, - mock_listener_prov_status_active, - mock_loadbalancer_prov_status_active, - mock_pool_prov_status_error): - - pool_to_error_on_revert = lifecycle_tasks.PoolToErrorOnRevertTask() - - # Execute - pool_to_error_on_revert.execute(self.POOL, - self.LISTENERS, - self.LOADBALANCER) - - self.assertFalse(mock_pool_prov_status_error.called) - - # Revert - pool_to_error_on_revert.revert(self.POOL, - self.LISTENERS, - self.LOADBALANCER) - - mock_pool_prov_status_error.assert_called_once_with( - self.POOL_ID) - mock_loadbalancer_prov_status_active.assert_called_once_with( - self.LOADBALANCER_ID) - mock_listener_prov_status_active.assert_called_once_with( - self.LISTENER_ID) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py deleted file mode 100644 index 2969758df1..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from unittest import mock - -from octavia.controller.worker.v1.tasks import model_tasks -import octavia.tests.unit.base as base - - -class TestObjectUpdateTasks(base.TestCase): - - def setUp(self): - - self.listener_mock = mock.MagicMock() - self.listener_mock.name = 'TEST' - - super().setUp() - - def test_delete_model_object(self): - - delete_object = model_tasks.DeleteModelObject() - delete_object.execute(self.listener_mock) - - self.listener_mock.delete.assert_called_once_with() - - def test_update_listener(self): - - update_attr = model_tasks.UpdateAttributes() - update_attr.execute(self.listener_mock, - {'name': 'TEST2'}) - - self.listener_mock.update.assert_called_once_with({'name': 'TEST2'}) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py deleted file mode 100644 index 7fd00f7d35..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +++ /dev/null @@ -1,1788 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils -from taskflow.types import failure -import tenacity - -from octavia.common import constants -from octavia.common import data_models as o_data_models -from octavia.common import exceptions -from octavia.controller.worker.v1.tasks import network_tasks -from octavia.network import base as net_base -from octavia.network import data_models -from octavia.tests.common import constants as t_constants -import octavia.tests.unit.base as base - - -AMPHORA_ID = 7 -COMPUTE_ID = uuidutils.generate_uuid() -PORT_ID = uuidutils.generate_uuid() -SUBNET_ID = uuidutils.generate_uuid() -NETWORK_ID = uuidutils.generate_uuid() -MGMT_NETWORK_ID = uuidutils.generate_uuid() -MGMT_SUBNET_ID = uuidutils.generate_uuid() -SG_ID = uuidutils.generate_uuid() -IP_ADDRESS = "172.24.41.1" -VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID, - subnet_id=t_constants.MOCK_SUBNET_ID, - qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) -VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2, - subnet_id=t_constants.MOCK_SUBNET_ID2, - qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2) -LB = o_data_models.LoadBalancer(vip=VIP) -LB2 = o_data_models.LoadBalancer(vip=VIP2) -FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID} -FIXED_IPS = [FIRST_IP] -INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(), - compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS, - port_id=PORT_ID) -AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1, - status=constants.AMPHORA_ALLOCATED, - vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1, - vrrp_ip=t_constants.MOCK_VRRP_IP1), - o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2, - status=constants.AMPHORA_ALLOCATED, - vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2, - vrrp_ip=t_constants.MOCK_VRRP_IP2), - o_data_models.Amphora(id=t_constants.MOCK_AMP_ID3, - status=constants.DELETED, - vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID3, - vrrp_ip=t_constants.MOCK_VRRP_IP3) - ] -UPDATE_DICT = {constants.TOPOLOGY: None} - - -class TestException(Exception): - - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - - -@mock.patch('octavia.common.utils.get_network_driver') -class TestNetworkTasks(base.TestCase): - def setUp(self): - network_tasks.LOG = mock.MagicMock() - self.amphora_mock = mock.MagicMock() - self.load_balancer_mock = mock.MagicMock() - self.vip_mock = mock.MagicMock() - self.vip_mock.subnet_id = SUBNET_ID - self.vip_mock.network_id = NETWORK_ID - self.load_balancer_mock.vip = self.vip_mock - self.load_balancer_mock.amphorae = [] - self.amphora_mock.id = AMPHORA_ID - self.amphora_mock.compute_id = COMPUTE_ID - self.amphora_mock.status = constants.AMPHORA_ALLOCATED - self.mgmt_net_id = MGMT_NETWORK_ID - self.mgmt_subnet_id = MGMT_SUBNET_ID - conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - conf.config(group="controller_worker", - amp_boot_network_list=[MGMT_NETWORK_ID]) - conf.config(group="networking", max_retries=1) - super().setUp() - - def test_calculate_amphora_delta(self, mock_get_net_driver): - VRRP_PORT_ID = uuidutils.generate_uuid() - VIP_NETWORK_ID = uuidutils.generate_uuid() - VIP_SUBNET_ID = uuidutils.generate_uuid() - DELETE_NETWORK_ID = uuidutils.generate_uuid() - MEMBER_NETWORK_ID = uuidutils.generate_uuid() - MEMBER_SUBNET_ID = uuidutils.generate_uuid() - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - member_mock = mock.MagicMock() - member_mock.subnet_id = MEMBER_SUBNET_ID - pool_mock = mock.MagicMock() - pool_mock.members = [member_mock] - lb_mock = mock.MagicMock() - lb_mock.pools = [pool_mock] - lb_mock.vip = mock.MagicMock() - lb_mock.vip.subnet_id = VIP_SUBNET_ID - lb_mock.vip.network_id = VIP_NETWORK_ID - amphora_mock = mock.MagicMock() - amphora_mock.id = AMPHORA_ID - amphora_mock.compute_id = COMPUTE_ID - amphora_mock.vrrp_port_id = VRRP_PORT_ID - - mgmt_subnet = data_models.Subnet( - id=MGMT_SUBNET_ID, - network_id=MGMT_NETWORK_ID) - mgmt_net = data_models.Network( - id=MGMT_NETWORK_ID, - subnets=[mgmt_subnet.id]) - mgmt_interface = data_models.Interface( - network_id=mgmt_net.id, - fixed_ips=[ - data_models.FixedIP( - subnet_id=mgmt_subnet.id)]) - - vrrp_subnet = data_models.Subnet( - id=VIP_SUBNET_ID, - network_id=VIP_NETWORK_ID) - vrrp_port = data_models.Port( - id=VRRP_PORT_ID, - network_id=VIP_NETWORK_ID, - fixed_ips=[ - data_models.FixedIP( - subnet=vrrp_subnet, - subnet_id=vrrp_subnet.id)]) - vrrp_interface = data_models.Interface( - network_id=VIP_NETWORK_ID, - fixed_ips=vrrp_port.fixed_ips) - - member_subnet = data_models.Subnet( - id=MEMBER_SUBNET_ID, - network_id=MEMBER_NETWORK_ID) - - to_be_deleted_interface = data_models.Interface( - id=mock.Mock(), - network_id=DELETE_NETWORK_ID) - - mock_driver.get_port.return_value = vrrp_port - mock_driver.get_subnet.return_value = member_subnet - mock_driver.get_network.return_value = mgmt_net - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - vrrp_interface, - to_be_deleted_interface] - - calc_amp_delta = network_tasks.CalculateAmphoraDelta() - - # Test vrrp_port_id is None - result = calc_amp_delta.execute(lb_mock, amphora_mock, {}) - - self.assertEqual(AMPHORA_ID, result.amphora_id) - self.assertEqual(COMPUTE_ID, result.compute_id) - self.assertEqual(1, len(result.add_nics)) - self.assertEqual(MEMBER_NETWORK_ID, result.add_nics[0].network_id) - self.assertEqual(1, len(result.delete_nics)) - self.assertEqual(DELETE_NETWORK_ID, result.delete_nics[0].network_id) - mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID) - mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) - - def test_calculate_delta(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - empty_deltas = {self.amphora_mock.id: data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=[], - )} - - mgmt_subnet = data_models.Subnet( - id=self.mgmt_subnet_id, network_id=self.mgmt_net_id) - mgmt_net = data_models.Network( - id=self.mgmt_net_id, - subnets=[mgmt_subnet.id]) - mgmt_ip_address = mock.MagicMock() - mgmt_interface = data_models.Interface( - network_id=self.mgmt_net_id, - fixed_ips=[ - data_models.FixedIP( - subnet=mgmt_subnet, - subnet_id=self.mgmt_subnet_id, - ip_address=mgmt_ip_address - ) - ]) - vrrp_subnet = data_models.Subnet( - id=self.vip_mock.subnet_id, network_id=self.vip_mock.network_id, - name='vrrp_subnet') - member_vip_subnet = data_models.Subnet( - id=uuidutils.generate_uuid(), network_id=self.vip_mock.network_id, - name='member_vip_subnet') - vip_net = data_models.Network( - id=self.vip_mock.network_id, - subnets=[member_vip_subnet, vrrp_subnet], - name='flat_network') - vrrp_port = data_models.Port( - id=uuidutils.generate_uuid(), - network_id=vip_net.id, network=vip_net, - fixed_ips=[ - data_models.FixedIP( - subnet=vrrp_subnet, subnet_id=vrrp_subnet.id, - ip_address=t_constants.MOCK_IP_ADDRESS) - ], - name='vrrp_port') - - member_private_net_id = uuidutils.generate_uuid() - member_private_subnet = data_models.Subnet( - id=uuidutils.generate_uuid(), network_id=member_private_net_id, - name='member_private_subnet') - member_private_subnet2 = data_models.Subnet( - id=uuidutils.generate_uuid(), network_id=member_private_net_id, - name='member_private_subnet2') - member_private_net = data_models.Network( - id=member_private_subnet.network_id, - subnets=[member_private_subnet, member_private_subnet2], - name='member_private_net') - member_private_subnet_port = data_models.Port( - id=uuidutils.generate_uuid(), - network_id=member_private_net.id, network=member_private_net, - fixed_ips=[ - data_models.FixedIP( - subnet=member_private_subnet, - subnet_id=member_private_subnet.id, - ip_address=t_constants.MOCK_IP_ADDRESS2) - ], - name='member_private_net_port') - member_private_subnet2_port = data_models.Port( - id=uuidutils.generate_uuid(), - network_id=member_private_net.id, network=member_private_net, - fixed_ips=[ - data_models.FixedIP( - subnet=member_private_subnet2, - subnet_id=member_private_subnet2.id, - ip_address=t_constants.MOCK_IP_ADDRESS2) - ], - name='member_private_net_port') - - # Pretend the VIP is on the member network, so already plugged - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vip_net.id, port_id=vrrp_port.id, - fixed_ips=vrrp_port.fixed_ips)] - mock_driver.get_port.return_value = vrrp_port - mock_driver.get_subnet.return_value = vrrp_subnet - mock_driver.get_network.return_value = mgmt_net - - calc_delta = network_tasks.CalculateDelta() - - # Test with no amps or anything at all - self.assertEqual({}, calc_delta.execute( - self.load_balancer_mock, {})) - - # Test with one amp and no pools, only the base network plugged - # Delta should be empty - mock_driver.reset_mock() - - self.amphora_mock.load_balancer = self.load_balancer_mock - self.load_balancer_mock.amphorae = [self.amphora_mock] - self.load_balancer_mock.pools = [] - self.assertEqual(empty_deltas, - calc_delta.execute(self.load_balancer_mock, {})) - mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) - - # Test with one amp and one pool but no members, nothing plugged - # Delta should be empty - mock_driver.reset_mock() - pool_mock = mock.MagicMock() - pool_mock.members = [] - self.load_balancer_mock.pools = [pool_mock] - self.assertEqual(empty_deltas, - calc_delta.execute(self.load_balancer_mock, {})) - - # Test with one amp/pool and one member (on a distinct member subnet) - # Dummy AZ is provided - # Only the base network is already plugged - # Delta should be one additional network/subnet to plug - mock_driver.reset_mock() - member_mock = mock.MagicMock() - member_mock.subnet_id = member_private_subnet.id - member2_mock = mock.MagicMock() - member2_mock.subnet_id = member_private_subnet2.id - pool_mock.members = [member_mock] - az = { - constants.COMPUTE_ZONE: 'foo' - } - mock_driver.get_subnet.return_value = data_models.Subnet( - id=2, network_id=3) - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[ - data_models.Interface( - network_id=3, - fixed_ips=[ - data_models.FixedIP( - subnet_id=member_private_subnet.id)])], - delete_nics=[], - add_subnets=[{ - 'subnet_id': member_private_subnet.id, - 'network_id': 3, - 'port_id': None}], - delete_subnets=[]) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, az)) - - mock_driver.get_subnet.assert_called_once_with( - member_mock.subnet_id) - - # Test with one amp/pool and one member (not plugged) that is being - # deleted - # Only the base network is already plugged - # Delta should be empty - mock_driver.reset_mock() - member_mock = mock.MagicMock() - member_mock.subnet_id = member_private_subnet.id - member_mock.provisioning_status = constants.PENDING_DELETE - pool_mock.members = [member_mock] - - self.assertEqual(empty_deltas, - calc_delta.execute(self.load_balancer_mock, {})) - - # Test with one amp/pool and one member (without any subnets) - # Only the base network is already plugged - # No delta - mock_driver.reset_mock() - member_mock = mock.MagicMock() - member_mock.subnet_id = None - pool_mock.members = [member_mock] - - self.assertEqual(empty_deltas, - calc_delta.execute(self.load_balancer_mock, {})) - - # Test with one amp and one pool and one member - # Management network is defined in AZ metadata - # Base network AND member network/subnet already plugged - # Delta should be empty - mock_driver.reset_mock() - member_mock = mock.MagicMock() - member_mock.subnet_id = member_private_subnet.id - pool_mock.members = [member_mock] - - mgmt2_subnet_id = uuidutils.generate_uuid() - mgmt2_net_id = uuidutils.generate_uuid() - mgmt2_subnet = data_models.Subnet( - id=mgmt2_subnet_id, - network_id=mgmt2_net_id) - mgmt2_net = data_models.Network( - id=mgmt2_net_id, - subnets=[mgmt2_subnet.id] - ) - mgmt2_interface = data_models.Interface( - network_id=mgmt2_net_id, - fixed_ips=[ - data_models.FixedIP( - subnet=mgmt2_subnet, - subnet_id=mgmt2_subnet_id, - ) - ]) - mock_driver.get_network.return_value = mgmt2_net - az = { - constants.MANAGEMENT_NETWORK: mgmt2_net_id, - } - mock_driver.get_subnet.return_value = member_private_subnet - mock_driver.get_plugged_networks.return_value = [ - mgmt2_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id=member_private_subnet.network_id, - fixed_ips=member_private_subnet_port.fixed_ips)] - - self.assertEqual(empty_deltas, - calc_delta.execute(self.load_balancer_mock, az)) - - # Test with one amp and one pool and one member, wrong network plugged - # Delta should be one network/subnet to add and one to remove - mock_driver.reset_mock() - mock_driver.get_network.return_value = mgmt_net - member_mock = mock.MagicMock() - member_mock.subnet_id = member_private_subnet.id - pool_mock.members = [member_mock] - az = { - constants.COMPUTE_ZONE: 'foo' - } - mock_driver.get_subnet.return_value = member_private_subnet - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id='bad_net', - fixed_ips=[data_models.FixedIP(subnet_id='bad_subnet')])] - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[data_models.Interface( - network_id=member_private_net.id, - fixed_ips=[data_models.FixedIP( - subnet_id=member_private_subnet.id)])], - delete_nics=[data_models.Interface(network_id='bad_net')], - add_subnets=[{ - 'subnet_id': member_private_subnet.id, - 'network_id': member_private_net.id, - 'port_id': None - }], - delete_subnets=[{ - 'subnet_id': 'bad_subnet', - 'network_id': 'bad_net', - 'port_id': None - }]) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, az)) - - # Test with one amp and one pool and no members, one network plugged - # Delta should be one network to remove - mock_driver.reset_mock() - pool_mock.members = [] - mock_driver.get_subnet.side_effect = [ - vrrp_subnet] - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id='bad_net', - fixed_ips=[data_models.FixedIP(subnet_id='bad_subnet')])] - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[data_models.Interface(network_id='bad_net')], - add_subnets=[], - delete_subnets=[{ - 'subnet_id': 'bad_subnet', - 'network_id': 'bad_net', - 'port_id': None - }]) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, {})) - - # Add a new member on a new subnet, an interface with another subnet of - # the same network is already plugged - # Delta should be one new subnet - mock_driver.reset_mock() - pool_mock.members = [member_mock, member2_mock] - mock_driver.get_subnet.side_effect = [ - vrrp_subnet, - member_private_subnet, - member_private_subnet2] - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id=member_private_net_id, - port_id=member_private_subnet_port.id, - fixed_ips=member_private_subnet_port.fixed_ips)] - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[{ - 'subnet_id': member_private_subnet2.id, - 'network_id': member_private_net_id, - 'port_id': member_private_subnet_port.id - }], - delete_subnets=[] - ) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, {})) - - # a new member on a new subnet on an existing network, a delete member2 - # on another subnet of the same network - # Delta should be one new subnet, one deleted subnet, no interface - # change - mock_driver.reset_mock() - pool_mock.members = [member_mock] - mock_driver.get_subnet.return_value = member_private_subnet - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id=member_private_net_id, - port_id=member_private_subnet2_port.id, - fixed_ips=member_private_subnet2_port.fixed_ips)] - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[{ - 'subnet_id': member_private_subnet.id, - 'network_id': member_private_net_id, - 'port_id': member_private_subnet2_port.id}], - delete_subnets=[{ - 'subnet_id': member_private_subnet2.id, - 'network_id': member_private_net_id, - 'port_id': member_private_subnet2_port.id}] - ) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, {})) - - # member on subnet on the same network as the vip subnet - mock_driver.reset_mock() - member_mock.subnet_id = member_vip_subnet.id - pool_mock.members = [member_mock] - mock_driver.get_subnet.side_effect = [ - vrrp_subnet, - member_vip_subnet] - mock_driver.get_plugged_networks.return_value = [ - mgmt_interface, - data_models.Interface( - network_id=vrrp_subnet.network_id, - port_id=vrrp_port.id, - fixed_ips=vrrp_port.fixed_ips), - data_models.Interface( - network_id=member_private_net_id, - port_id=member_private_subnet_port.id, - fixed_ips=member_private_subnet_port.fixed_ips)] - - ndm = data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[ - data_models.Interface( - network_id=member_private_net_id, - port_id=member_private_subnet_port.id)], - add_subnets=[{ - 'subnet_id': member_vip_subnet.id, - 'network_id': vip_net.id, - 'port_id': vrrp_port.id}], - delete_subnets=[{ - 'subnet_id': member_private_subnet.id, - 'network_id': member_private_net_id, - 'port_id': member_private_subnet_port.id}] - ) - self.assertEqual({self.amphora_mock.id: ndm}, - calc_delta.execute(self.load_balancer_mock, {})) - - def test_calculate_delta_ipv6_ipv4_subnets(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - # Pool mock should be configured explicitly for each test - pool_mock = mock.MagicMock() - self.load_balancer_mock.pools = [pool_mock] - self.amphora_mock.load_balancer = self.load_balancer_mock - self.load_balancer_mock.amphorae = [self.amphora_mock] - - # Test with one amp and one pool and one new member (in the VIP net) - # Delta should be one additional subnet to plug to the existing port - vrrp_subnet = data_models.Subnet( - id=self.vip_mock.subnet_id, network_id=self.vip_mock.network_id) - member_subnet = data_models.Subnet( - id=uuidutils.generate_uuid(), network_id=self.vip_mock.network_id) - flat_network = data_models.Network( - id=self.vip_mock.network_id, subnets=[member_subnet, vrrp_subnet]) - vrrp_port = data_models.Port( - id=uuidutils.generate_uuid(), - network_id=flat_network.id, network=flat_network, - fixed_ips=[ - data_models.FixedIP( - subnet=vrrp_subnet, subnet_id=vrrp_subnet.id, - ip_address=t_constants.MOCK_IP_ADDRESS) - ]) - mock_driver.get_subnet.return_value = member_subnet - member_mock = mock.MagicMock() - member_mock.subnet_id = member_subnet.id - pool_mock.members = [member_mock] - # Pretend the VIP is on the member network, so already plugged - mock_driver.get_plugged_networks.return_value = [ - data_models.Interface( - network_id=flat_network.id, port_id=vrrp_port.id, - fixed_ips=vrrp_port.fixed_ips)] - mock_driver.get_port.return_value = vrrp_port - - calc_delta = network_tasks.CalculateDelta() - deltas = calc_delta.execute(self.load_balancer_mock, {}) - - expected_delta = {self.amphora_mock.id: data_models.Delta( - amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[member_subnet.id], - delete_subnets=[])} - self.assertEqual(expected_delta, deltas) - - def test_get_plumbed_networks(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - mock_driver.get_plugged_networks.side_effect = [['blah']] - net = network_tasks.GetPlumbedNetworks() - - self.assertEqual(['blah'], net.execute(self.amphora_mock)) - mock_driver.get_plugged_networks.assert_called_once_with( - COMPUTE_ID) - - def test_plug_networks(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - def _interface(network_id): - return [data_models.Interface(network_id=network_id)] - - net = network_tasks.PlugNetworks() - - net.execute(self.amphora_mock, None) - self.assertFalse(mock_driver.plug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[]) - net.execute(self.amphora_mock, delta) - self.assertFalse(mock_driver.plug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=_interface(1), - delete_nics=[]) - net.execute(self.amphora_mock, delta) - mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1) - - # revert - net.revert(self.amphora_mock, None) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[]) - net.revert(self.amphora_mock, delta) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=_interface(1), - delete_nics=[]) - net.revert(self.amphora_mock, delta) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = net_base.NetworkNotFound - net.revert(self.amphora_mock, delta) # No exception - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = TestException('test') - self.assertRaises(TestException, - net.revert, - self.amphora_mock, - delta) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - def test_unplug_networks(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - def _interface(network_id): - return [data_models.Interface(network_id=network_id)] - - net = network_tasks.UnPlugNetworks() - - net.execute(self.amphora_mock, None) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[]) - net.execute(self.amphora_mock, delta) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=_interface(1)) - net.execute(self.amphora_mock, delta) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = net_base.NetworkNotFound - net.execute(self.amphora_mock, delta) # No exception - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - # Do a test with a general exception in case behavior changes - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = Exception() - net.execute(self.amphora_mock, delta) # No exception - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - def test_get_member_ports(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - def _interface(port_id): - return [data_models.Interface(port_id=port_id)] - - net_task = network_tasks.GetMemberPorts() - net_task.execute(LB, self.amphora_mock) - mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID) - mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID) - - mock_driver.reset_mock() - net_task = network_tasks.GetMemberPorts() - mock_driver.get_plugged_networks.return_value = _interface(1) - mock_driver.get_port.side_effect = [ - data_models.Port(network_id=NETWORK_ID), - data_models.Port(network_id=NETWORK_ID)] - net_task.execute(self.load_balancer_mock, self.amphora_mock) - self.assertEqual(2, mock_driver.get_port.call_count) - self.assertFalse(mock_driver.get_network.called) - - mock_driver.reset_mock() - port_mock = mock.MagicMock() - fixed_ip_mock = mock.MagicMock() - fixed_ip_mock.subnet_id = 1 - port_mock.fixed_ips = [fixed_ip_mock] - net_task = network_tasks.GetMemberPorts() - mock_driver.get_plugged_networks.return_value = _interface(1) - mock_driver.get_port.side_effect = [ - data_models.Port(network_id=NETWORK_ID), port_mock] - ports = net_task.execute(self.load_balancer_mock, self.amphora_mock) - mock_driver.get_subnet.assert_called_once_with(1) - self.assertEqual([port_mock], ports) - - def test_handle_network_delta(self, mock_get_net_driver): - mock_net_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_net_driver - - nic1 = mock.MagicMock() - nic1.fixed_ips = [data_models.FixedIP( - subnet_id=uuidutils.generate_uuid())] - nic1.network_id = uuidutils.generate_uuid() - nic2 = mock.MagicMock() - nic2.fixed_ips = [data_models.FixedIP( - subnet_id=uuidutils.generate_uuid())] - nic2.network_id = uuidutils.generate_uuid() - interface1 = mock.MagicMock() - interface1.port_id = uuidutils.generate_uuid() - port1 = mock.MagicMock() - port1.network_id = uuidutils.generate_uuid() - fixed_ip = mock.MagicMock() - fixed_ip.subnet_id = nic1.fixed_ips[0].subnet_id - fixed_ip2 = mock.MagicMock() - fixed_ip2.subnet_id = uuidutils.generate_uuid() - port1.fixed_ips = [fixed_ip, fixed_ip2] - subnet = mock.MagicMock() - network = mock.MagicMock() - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[nic1], - delete_nics=[nic2, nic2, nic2], - add_subnets=[], - delete_subnets=[]) - - mock_net_driver.plug_network.return_value = interface1 - mock_net_driver.get_port.return_value = port1 - fixed_port1 = mock.MagicMock() - fixed_port1.network_id = port1.network_id - fixed_port1.fixed_ips = [fixed_ip] - mock_net_driver.unplug_fixed_ip.return_value = fixed_port1 - mock_net_driver.get_network.return_value = network - mock_net_driver.get_subnet.return_value = subnet - - mock_net_driver.unplug_network.side_effect = [ - None, net_base.NetworkNotFound, Exception] - - handle_net_delta_obj = network_tasks.HandleNetworkDelta() - result = handle_net_delta_obj.execute(self.amphora_mock, delta) - - mock_net_driver.plug_network.assert_called_once_with( - self.amphora_mock.compute_id, nic1.network_id) - mock_net_driver.unplug_fixed_ip.assert_called_once_with( - port_id=interface1.port_id, subnet_id=fixed_ip2.subnet_id) - mock_net_driver.get_port.assert_called_once_with(interface1.port_id) - mock_net_driver.get_network.assert_called_once_with(port1.network_id) - mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id) - - self.assertEqual({self.amphora_mock.id: [fixed_port1]}, result) - - mock_net_driver.unplug_network.assert_called_with( - self.amphora_mock.compute_id, nic2.network_id) - - # Revert - delta2 = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[nic1, nic1], - delete_nics=[nic2, nic2, nic2]) - - mock_net_driver.unplug_network.reset_mock() - handle_net_delta_obj.revert( - failure.Failure.from_exception(Exception('boom')), None, None) - mock_net_driver.unplug_network.assert_not_called() - - mock_net_driver.unplug_network.reset_mock() - handle_net_delta_obj.revert(None, None, None) - mock_net_driver.unplug_network.assert_not_called() - - mock_net_driver.unplug_network.reset_mock() - handle_net_delta_obj.revert(None, None, delta2) - - def test_handle_network_deltas(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - self.load_balancer_mock.amphorae = [self.amphora_mock] - - subnet1 = uuidutils.generate_uuid() - network1 = uuidutils.generate_uuid() - port1 = uuidutils.generate_uuid() - subnet2 = uuidutils.generate_uuid() - - def _interface(network_id, port_id=None, subnet_id=None): - return data_models.Interface( - network_id=network_id, - port_id=port_id, - fixed_ips=[ - data_models.FixedIP( - subnet_id=subnet_id)]) - - net = network_tasks.HandleNetworkDeltas() - - net.execute({}, self.load_balancer_mock) - self.assertFalse(mock_driver.plug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=[]) - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - self.assertFalse(mock_driver.plug_network.called) - - # Adding a subnet on a new network - port = data_models.Port( - id=port1, - network_id=network1, - fixed_ips=[ - data_models.FixedIP(subnet_id=subnet1)]) - mock_driver.get_port.return_value = port - mock_driver.plug_fixed_ip.return_value = port - mock_driver.get_network.return_value = data_models.Network( - id=network1) - mock_driver.get_subnet.return_value = data_models.Subnet( - id=subnet1, - network_id=network1) - add_nics = [_interface(network1, subnet_id=subnet1)] - add_subnets = [{ - 'subnet_id': subnet1, - 'network_id': network1, - 'port_id': None}] - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=add_nics, - delete_nics=[], - add_subnets=add_subnets, - delete_subnets=[]) - updated_ports = net.execute({self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.plug_network.assert_called_once_with( - self.amphora_mock.compute_id, network1) - mock_driver.unplug_network.assert_not_called() - - self.assertEqual(1, len(updated_ports)) - - updated_port = updated_ports[self.amphora_mock.id][0] - self.assertEqual(port1, updated_port.id) - self.assertEqual(network1, updated_port.network_id) - self.assertEqual(1, len(updated_port.fixed_ips)) - self.assertEqual(subnet1, updated_port.fixed_ips[0].subnet_id) - - # revert - net.revert(None, {self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with( - self.amphora_mock.compute_id, network1) - - # Adding a subnet on an existing network/port - mock_driver.reset_mock() - port = data_models.Port( - id=port1, - network_id=network1, - fixed_ips=[ - data_models.FixedIP(subnet_id=subnet2), - data_models.FixedIP(subnet_id=subnet1)]) - mock_driver.plug_fixed_ip.return_value = port - mock_driver.get_network.return_value = data_models.Network( - id=network1) - mock_driver.get_subnet.side_effect = [ - data_models.Subnet( - id=subnet2, - network_id=network1), - data_models.Subnet( - id=subnet1, - network_id=network1)] - add_nics = [_interface(network1)] - add_subnets = [{ - 'subnet_id': subnet1, - 'network_id': network1, - 'port_id': port1}] - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=add_subnets, - delete_subnets=[]) - updated_ports = net.execute({self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.plug_network.assert_not_called() - mock_driver.unplug_network.assert_not_called() - mock_driver.get_port.assert_not_called() - mock_driver.plug_fixed_ip.assert_called_once_with(port_id=port1, - subnet_id=subnet1) - self.assertEqual(1, len(updated_ports)) - - updated_port = updated_ports[self.amphora_mock.id][0] - self.assertEqual(port1, updated_port.id) - self.assertEqual(network1, updated_port.network_id) - self.assertEqual(2, len(updated_port.fixed_ips)) - self.assertEqual(subnet2, updated_port.fixed_ips[0].subnet_id) - self.assertEqual(subnet1, updated_port.fixed_ips[1].subnet_id) - - # Deleting a subnet - mock_driver.reset_mock() - delete_subnets = [{ - 'subnet_id': subnet1, - 'network_id': network1, - 'port_id': port1}] - mock_driver.get_subnet.side_effect = [ - data_models.Subnet( - id=subnet2, - network_id=network1)] - mock_driver.unplug_fixed_ip.return_value = data_models.Port( - id=port1, - network_id=network1, - fixed_ips=[ - data_models.FixedIP(subnet_id=subnet2)]) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=delete_subnets) - updated_ports = net.execute({self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.delete_port.assert_not_called() - mock_driver.plug_network.assert_not_called() - mock_driver.plug_fixed_ip.assert_not_called() - self.assertEqual(1, len(updated_ports)) - self.assertEqual(1, len(updated_ports[self.amphora_mock.id])) - - updated_port = updated_ports[self.amphora_mock.id][0] - self.assertEqual(port1, updated_port.id) - self.assertEqual(network1, updated_port.network_id) - self.assertEqual(1, len(updated_port.fixed_ips)) - self.assertEqual(subnet2, updated_port.fixed_ips[0].subnet_id) - - # Deleting a subnet, but neutron doesn't unplug it - # Delta are empty because there's nothing to update - mock_driver.reset_mock() - delete_subnets = [{ - 'subnet_id': subnet1, - 'network_id': network1, - 'port_id': port1}] - mock_driver.get_subnet.side_effect = [ - data_models.Subnet( - id=subnet2, - network_id=network1), - data_models.Subnet( - id=subnet2, - network_id=network1)] - mock_driver.unplug_fixed_ip.return_value = data_models.Port( - id=port1, - network_id=network1, - fixed_ips=[ - data_models.FixedIP(subnet_id=subnet1), - data_models.FixedIP(subnet_id=subnet2)]) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=[]) - net.execute({self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.delete_port.assert_not_called() - mock_driver.plug_network.assert_not_called() - mock_driver.plug_fixed_ip.assert_not_called() - - # Deleting a subnet and a network - mock_driver.reset_mock() - mock_driver.get_subnet.side_effect = [ - data_models.Subnet( - id=subnet2, - network_id=network1), - data_models.Subnet( - id=subnet1, - network_id=network1)] - delete_nics = [_interface(network1, port_id=port1)] - delete_subnets = [{ - 'subnet_id': subnet1, - 'network_id': network1, - 'port_id': port1}] - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=delete_nics, - add_subnets=[], - delete_subnets=delete_subnets) - updated_ports = net.execute({self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.delete_port.assert_called_once_with(port1) - mock_driver.plug_network.assert_not_called() - mock_driver.plug_fixed_ip.assert_not_called() - self.assertEqual(1, len(updated_ports)) - self.assertEqual(0, len(updated_ports[self.amphora_mock.id])) - - # No delta, no actions - mock_driver.reset_mock() - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=[]) - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - mock_driver.plug_network.assert_not_called() - mock_driver.plug_fixed_ip.assert_not_called() - mock_driver.unplug_network.assert_not_called() - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[_interface(1, port_id=12)], - delete_nics=[], - add_subnets=[], - delete_subnets=[]) - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = TestException('test') - net.revert(None, {self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with( - self.amphora_mock.compute_id, 1) - - mock_driver.reset_mock() - mock_driver.delete_port.side_effect = TestException('test') - net.revert(None, {self.amphora_mock.id: delta}, - self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with( - self.amphora_mock.compute_id, 1) - mock_driver.delete_port.assert_called_once_with(12) - - mock_driver.reset_mock() - net.execute({}, self.load_balancer_mock) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[], - add_subnets=[], - delete_subnets=[]) - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - self.assertFalse(mock_driver.unplug_network.called) - - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[_interface(1)], - add_subnets=[], - delete_subnets=[]) - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = net_base.NetworkNotFound - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - # Do a test with a general exception in case behavior changes - mock_driver.reset_mock() - mock_driver.unplug_network.side_effect = Exception() - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - - # Do a test with a general exception in case behavior changes - delta = data_models.Delta(amphora_id=self.amphora_mock.id, - compute_id=self.amphora_mock.compute_id, - add_nics=[], - delete_nics=[_interface(1, port_id=12)], - add_subnets=[], - delete_subnets=[]) - mock_driver.reset_mock() - mock_driver.delete_port.side_effect = Exception() - net.execute({self.amphora_mock.id: delta}, self.load_balancer_mock) - mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) - mock_driver.delete_port.assert_called_once_with(12) - - mock_driver.unplug_network.reset_mock() - net.revert( - failure.Failure.from_exception(Exception('boom')), None, None) - mock_driver.unplug_network.assert_not_called() - - mock_driver.unplug_network.reset_mock() - net.revert(None, None, None) - mock_driver.unplug_network.assert_not_called() - - def test_plug_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.PlugVIP() - - mock_driver.plug_vip.return_value = ["vip"] - - data = net.execute(LB) - mock_driver.plug_vip.assert_called_once_with(LB, LB.vip) - self.assertEqual(["vip"], data) - - # revert - net.revert(["vip"], LB) - mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) - - # revert with exception - mock_driver.reset_mock() - mock_driver.unplug_vip.side_effect = Exception('UnplugVipException') - net.revert(["vip"], LB) - mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'get_current_loadbalancer_from_db') - def test_apply_qos_on_creation(self, mock_get_lb_db, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.ApplyQos() - mock_get_lb_db.return_value = LB - - # execute - UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE - update_dict = UPDATE_DICT - net.execute(LB, [AMPS_DATA[0]], update_dict) - mock_driver.apply_qos_on_port.assert_called_once_with( - VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id) - self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) - standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY - mock_driver.reset_mock() - update_dict[constants.TOPOLOGY] = standby_topology - net.execute(LB, AMPS_DATA, update_dict) - mock_driver.apply_qos_on_port.assert_called_with( - t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) - self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) - - # revert - mock_driver.reset_mock() - update_dict = UPDATE_DICT - net.revert(None, LB, [AMPS_DATA[0]], update_dict) - self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) - mock_driver.reset_mock() - update_dict[constants.TOPOLOGY] = standby_topology - net.revert(None, LB, AMPS_DATA, update_dict) - self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) - - @mock.patch('octavia.controller.worker.task_utils.TaskUtils.' - 'get_current_loadbalancer_from_db') - def test_apply_qos_on_update(self, mock_get_lb_db, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.ApplyQos() - null_qos_vip = o_data_models.Vip(qos_policy_id=None) - null_qos_lb = o_data_models.LoadBalancer( - vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE, - amphorae=[AMPS_DATA[0]]) - - tmp_vip_object = o_data_models.Vip( - qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1) - tmp_lb = o_data_models.LoadBalancer( - vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE, - amphorae=[AMPS_DATA[0]]) - - # execute - update_dict = {'description': 'fool'} - net.execute(tmp_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_once_with( - t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id) - self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'vip': {'qos_policy_id': None}} - net.execute(null_qos_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_once_with( - None, AMPS_DATA[0].vrrp_port_id) - self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'name': '123'} - net.execute(null_qos_lb, update_dict=update_dict) - self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'description': 'fool'} - tmp_lb.amphorae = AMPS_DATA - tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY - net.execute(tmp_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_with( - t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) - self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'description': 'fool', - 'vip': { - 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}} - tmp_lb.amphorae = AMPS_DATA - tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY - net.execute(tmp_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_with( - t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) - self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {} - net.execute(null_qos_lb, update_dict=update_dict) - self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) - - # revert - mock_driver.reset_mock() - tmp_lb.amphorae = [AMPS_DATA[0]] - tmp_lb.topology = constants.TOPOLOGY_SINGLE - update_dict = {'description': 'fool'} - mock_get_lb_db.return_value = tmp_lb - net.revert(None, tmp_lb, update_dict=update_dict) - self.assertEqual(0, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'vip': {'qos_policy_id': None}} - ori_lb_db = LB2 - ori_lb_db.amphorae = [AMPS_DATA[0]] - mock_get_lb_db.return_value = ori_lb_db - net.revert(None, null_qos_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_once_with( - t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id) - self.assertEqual(1, mock_driver.apply_qos_on_port.call_count) - - mock_driver.reset_mock() - update_dict = {'vip': { - 'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}} - tmp_lb.amphorae = AMPS_DATA - tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY - ori_lb_db = LB2 - ori_lb_db.amphorae = [AMPS_DATA[0]] - mock_get_lb_db.return_value = ori_lb_db - net.revert(None, tmp_lb, update_dict=update_dict) - mock_driver.apply_qos_on_port.assert_called_with( - t_constants.MOCK_QOS_POLICY_ID2, mock.ANY) - self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) - - def test_unplug_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.UnplugVIP() - - net.execute(LB) - mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip) - - def test_allocate_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.AllocateVIP() - - mock_driver.allocate_vip.return_value = (LB.vip, []) - - mock_driver.reset_mock() - self.assertEqual(LB.vip, net.execute(LB)) - mock_driver.allocate_vip.assert_called_once_with(LB) - - # revert - vip_mock = mock.MagicMock() - net.revert(vip_mock, LB) - mock_driver.deallocate_vip.assert_called_once_with(vip_mock) - - # revert exception - mock_driver.reset_mock() - mock_driver.deallocate_vip.side_effect = Exception('DeallVipException') - vip_mock = mock.MagicMock() - net.revert(vip_mock, LB) - mock_driver.deallocate_vip.assert_called_once_with(vip_mock) - - def test_allocate_vip_for_failover(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.AllocateVIPforFailover() - - mock_driver.allocate_vip.return_value = (LB.vip, []) - - mock_driver.reset_mock() - self.assertEqual(LB.vip, net.execute(LB)) - mock_driver.allocate_vip.assert_called_once_with(LB) - - # revert - vip_mock = mock.MagicMock() - net.revert(vip_mock, LB) - mock_driver.deallocate_vip.assert_not_called() - - def test_deallocate_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.DeallocateVIP() - vip = o_data_models.Vip() - lb = o_data_models.LoadBalancer(vip=vip) - net.execute(lb) - mock_driver.deallocate_vip.assert_called_once_with(lb.vip) - - def test_update_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - vip = o_data_models.Vip() - lb = o_data_models.LoadBalancer(vip=vip) - net_task = network_tasks.UpdateVIP() - net_task.execute(lb) - mock_driver.update_vip.assert_called_once_with(lb) - - def test_update_vip_for_delete(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - vip = o_data_models.Vip() - lb = o_data_models.LoadBalancer(vip=vip) - net_task = network_tasks.UpdateVIPForDelete() - net_task.execute(lb) - mock_driver.update_vip.assert_called_once_with(lb, for_delete=True) - - @mock.patch('octavia.db.api.get_session', return_value='TEST') - @mock.patch('octavia.db.repositories.AmphoraRepository.get') - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_get_amphora_network_configs_by_id( - self, mock_lb_get, mock_amp_get, - mock_get_session, mock_get_net_driver): - LB_ID = uuidutils.generate_uuid() - AMP_ID = uuidutils.generate_uuid() - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - mock_amp_get.return_value = 'mock amphora' - mock_lb_get.return_value = 'mock load balancer' - - net_task = network_tasks.GetAmphoraNetworkConfigsByID() - - net_task.execute(LB_ID, AMP_ID) - - mock_driver.get_network_configs.assert_called_once_with( - 'mock load balancer', amphora='mock amphora') - mock_amp_get.assert_called_once_with('TEST', id=AMP_ID) - mock_lb_get.assert_called_once_with('TEST', id=LB_ID) - - @mock.patch('octavia.db.api.get_session', return_value='TEST') - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_get_amphorae_network_configs(self, mock_lb_get, mock_get_session, - mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - lb = o_data_models.LoadBalancer() - mock_lb_get.return_value = lb - net_task = network_tasks.GetAmphoraeNetworkConfigs() - net_task.execute(lb.id) - mock_lb_get.assert_called_once_with('TEST', id=lb.id) - mock_driver.get_network_configs.assert_called_once_with(lb) - - def test_failover_preparation_for_amphora(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - failover = network_tasks.FailoverPreparationForAmphora() - amphora = o_data_models.Amphora(id=AMPHORA_ID, - lb_network_ip=IP_ADDRESS) - failover.execute(amphora) - mock_driver.failover_preparation.assert_called_once_with(amphora) - - def test_retrieve_portids_on_amphora_except_lb_network( - self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - def _interface(port_id): - return [data_models.Interface(port_id=port_id)] - - net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() - amphora = o_data_models.Amphora(id=AMPHORA_ID, compute_id=COMPUTE_ID, - lb_network_ip=IP_ADDRESS) - - mock_driver.get_plugged_networks.return_value = [] - net_task.execute(amphora) - mock_driver.get_plugged_networks.assert_called_once_with( - compute_id=COMPUTE_ID) - self.assertFalse(mock_driver.get_port.called) - - mock_driver.reset_mock() - net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() - mock_driver.get_plugged_networks.return_value = _interface(1) - net_task.execute(amphora) - mock_driver.get_port.assert_called_once_with(port_id=1) - - mock_driver.reset_mock() - net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() - port_mock = mock.MagicMock() - fixed_ip_mock = mock.MagicMock() - fixed_ip_mock.ip_address = IP_ADDRESS - port_mock.fixed_ips = [fixed_ip_mock] - mock_driver.get_plugged_networks.return_value = _interface(1) - mock_driver.get_port.return_value = port_mock - ports = net_task.execute(amphora) - self.assertEqual([], ports) - - mock_driver.reset_mock() - net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork() - port_mock = mock.MagicMock() - fixed_ip_mock = mock.MagicMock() - fixed_ip_mock.ip_address = "172.17.17.17" - port_mock.fixed_ips = [fixed_ip_mock] - mock_driver.get_plugged_networks.return_value = _interface(1) - mock_driver.get_port.return_value = port_mock - ports = net_task.execute(amphora) - self.assertEqual(1, len(ports)) - - def test_plug_ports(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - - amphora = mock.MagicMock() - port1 = mock.MagicMock() - port2 = mock.MagicMock() - - plugports = network_tasks.PlugPorts() - plugports.execute(amphora, [port1, port2]) - - mock_driver.plug_port.assert_any_call(amphora, port1) - mock_driver.plug_port.assert_any_call(amphora, port2) - - @mock.patch('octavia.db.api.get_session', return_value='TEST') - @mock.patch('octavia.db.repositories.LoadBalancerRepository.get') - def test_update_vip_sg(self, mock_lb_get, mock_get_session, - mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_driver.update_vip_sg.return_value = SG_ID - mock_get_net_driver.return_value = mock_driver - mock_lb_get.return_value = self.load_balancer_mock - net = network_tasks.UpdateVIPSecurityGroup() - - sg_id = net.execute(self.load_balancer_mock.id) - mock_lb_get.assert_called_once_with('TEST', - id=self.load_balancer_mock.id) - mock_driver.update_vip_sg.assert_called_once_with( - self.load_balancer_mock, self.load_balancer_mock.vip) - self.assertEqual(sg_id, SG_ID) - - def test_get_subnet_from_vip(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.GetSubnetFromVIP() - - net.execute(LB) - mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id) - - def test_plug_vip_amphora(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.PlugVIPAmpphora() - mockSubnet = mock.MagicMock() - net.execute(LB, self.amphora_mock, mockSubnet) - mock_driver.plug_aap_port.assert_called_once_with( - LB, LB.vip, self.amphora_mock, mockSubnet) - - def test_revert_plug_vip_amphora(self, mock_get_net_driver): - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - net = network_tasks.PlugVIPAmpphora() - mockSubnet = mock.MagicMock() - net.revert(AMPS_DATA[0], LB, self.amphora_mock, mockSubnet) - mock_driver.unplug_aap_port.assert_called_once_with( - LB.vip, self.amphora_mock, mockSubnet) - - @mock.patch('octavia.controller.worker.v1.tasks.network_tasks.DeletePort.' - 'update_progress') - def test_delete_port(self, mock_update_progress, mock_get_net_driver): - PORT_ID = uuidutils.generate_uuid() - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - mock_driver.delete_port.side_effect = [ - mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT, - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom')] - mock_driver.admin_down_port.side_effect = [ - mock.DEFAULT, exceptions.OctaviaException('boom')] - - net_task = network_tasks.DeletePort() - - # Limit the retry attempts for the test run to save time - net_task.execute.retry.stop = tenacity.stop_after_attempt(2) - - # Test port ID is None (no-op) - net_task.execute(None) - - mock_update_progress.assert_not_called() - mock_driver.delete_port.assert_not_called() - - # Test successful delete - mock_update_progress.reset_mock() - mock_driver.reset_mock() - - net_task.execute(PORT_ID) - - mock_update_progress.assert_called_once_with(0.5) - mock_driver.delete_port.assert_called_once_with(PORT_ID) - - # Test exception and successful retry - mock_update_progress.reset_mock() - mock_driver.reset_mock() - - net_task.execute(PORT_ID) - - mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) - mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), - mock.call(PORT_ID)]) - - # Test passive failure - mock_update_progress.reset_mock() - mock_driver.reset_mock() - - net_task.execute(PORT_ID, passive_failure=True) - - mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) - mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), - mock.call(PORT_ID)]) - mock_driver.admin_down_port.assert_called_once_with(PORT_ID) - - # Test passive failure admin down failure - mock_update_progress.reset_mock() - mock_driver.reset_mock() - mock_driver.admin_down_port.reset_mock() - - net_task.execute(PORT_ID, passive_failure=True) - - mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) - mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), - mock.call(PORT_ID)]) - mock_driver.admin_down_port.assert_called_once_with(PORT_ID) - - # Test non-passive failure - mock_update_progress.reset_mock() - mock_driver.reset_mock() - mock_driver.admin_down_port.reset_mock() - - mock_driver.admin_down_port.side_effect = [ - exceptions.OctaviaException('boom')] - - self.assertRaises(exceptions.OctaviaException, net_task.execute, - PORT_ID) - - mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)]) - mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID), - mock.call(PORT_ID)]) - mock_driver.admin_down_port.assert_not_called() - - def test_create_vip_base_port(self, mock_get_net_driver): - AMP_ID = uuidutils.generate_uuid() - PORT_ID = uuidutils.generate_uuid() - VIP_NETWORK_ID = uuidutils.generate_uuid() - VIP_QOS_ID = uuidutils.generate_uuid() - VIP_SG_ID = uuidutils.generate_uuid() - VIP_SUBNET_ID = uuidutils.generate_uuid() - VIP_IP_ADDRESS = '203.0.113.81' - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - vip_mock = mock.MagicMock() - vip_mock.ip_address = VIP_IP_ADDRESS - vip_mock.network_id = VIP_NETWORK_ID - vip_mock.qos_policy_id = VIP_QOS_ID - vip_mock.subnet_id = VIP_SUBNET_ID - port_mock = mock.MagicMock() - port_mock.id = PORT_ID - - mock_driver.create_port.side_effect = [ - port_mock, exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom'), - exceptions.OctaviaException('boom')] - mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')] - - net_task = network_tasks.CreateVIPBasePort() - - # Limit the retry attempts for the test run to save time - net_task.execute.retry.stop = tenacity.stop_after_attempt(2) - - # Test execute - result = net_task.execute(vip_mock, VIP_SG_ID, AMP_ID) - - self.assertEqual(port_mock, result) - mock_driver.create_port.assert_called_once_with( - VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID, - fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}], - secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID], - qos_policy_id=VIP_QOS_ID) - - # Test execute exception - mock_driver.reset_mock() - - self.assertRaises(exceptions.OctaviaException, net_task.execute, - vip_mock, None, AMP_ID) - - # Test revert when this task failed - mock_driver.reset_mock() - - net_task.revert(failure.Failure.from_exception(Exception('boom')), - vip_mock, VIP_SG_ID, AMP_ID) - - mock_driver.delete_port.assert_not_called() - - # Test revert - mock_driver.reset_mock() - - net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID) - - mock_driver.delete_port.assert_called_once_with(PORT_ID) - - # Test revert exception - mock_driver.reset_mock() - - net_task.revert([port_mock], vip_mock, VIP_SG_ID, AMP_ID) - - mock_driver.delete_port.assert_called_once_with(PORT_ID) - - @mock.patch('time.sleep') - def test_admin_down_port(self, mock_sleep, mock_get_net_driver): - PORT_ID = uuidutils.generate_uuid() - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - port_down_mock = mock.MagicMock() - port_down_mock.status = constants.DOWN - port_up_mock = mock.MagicMock() - port_up_mock.status = constants.UP - mock_driver.set_port_admin_state_up.side_effect = [ - mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT, - Exception('boom')] - mock_driver.get_port.side_effect = [port_down_mock, port_up_mock] - - net_task = network_tasks.AdminDownPort() - - # Test execute - net_task.execute(PORT_ID) - - mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, - False) - mock_driver.get_port.assert_called_once_with(PORT_ID) - - # Test passive fail on port not found - mock_driver.reset_mock() - - net_task.execute(PORT_ID) - - mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, - False) - mock_driver.get_port.assert_not_called() - - # Test passive fail on port stays up - mock_driver.reset_mock() - - net_task.execute(PORT_ID) - - mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, - False) - mock_driver.get_port.assert_called_once_with(PORT_ID) - - # Test revert when this task failed - mock_driver.reset_mock() - - net_task.revert(failure.Failure.from_exception(Exception('boom')), - PORT_ID) - - mock_driver.set_port_admin_state_up.assert_not_called() - - # Test revert - mock_driver.reset_mock() - - net_task.revert(None, PORT_ID) - - mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, - True) - - # Test revert exception passive failure - mock_driver.reset_mock() - - net_task.revert(None, PORT_ID) - - mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID, - True) - - @mock.patch('octavia.common.utils.get_vip_security_group_name') - def test_get_vip_security_group_id(self, mock_get_sg_name, - mock_get_net_driver): - LB_ID = uuidutils.generate_uuid() - SG_ID = uuidutils.generate_uuid() - SG_NAME = 'fake_SG_name' - mock_driver = mock.MagicMock() - mock_get_net_driver.return_value = mock_driver - mock_get_sg_name.return_value = SG_NAME - sg_mock = mock.MagicMock() - sg_mock.id = SG_ID - mock_driver.get_security_group.side_effect = [ - sg_mock, None, net_base.SecurityGroupNotFound, - net_base.SecurityGroupNotFound] - - net_task = network_tasks.GetVIPSecurityGroupID() - - # Test execute - result = net_task.execute(LB_ID) - - mock_driver.get_security_group.assert_called_once_with(SG_NAME) - mock_get_sg_name.assert_called_once_with(LB_ID) - - # Test execute with empty get subnet response - mock_driver.reset_mock() - mock_get_sg_name.reset_mock() - - result = net_task.execute(LB_ID) - - self.assertIsNone(result) - mock_get_sg_name.assert_called_once_with(LB_ID) - - # Test execute no security group found, security groups enabled - mock_driver.reset_mock() - mock_get_sg_name.reset_mock() - mock_driver.sec_grp_enabled = True - - self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute, - LB_ID) - mock_driver.get_security_group.assert_called_once_with(SG_NAME) - mock_get_sg_name.assert_called_once_with(LB_ID) - - # Test execute no security group found, security groups disabled - mock_driver.reset_mock() - mock_get_sg_name.reset_mock() - mock_driver.sec_grp_enabled = False - - result = net_task.execute(LB_ID) - - self.assertIsNone(result) - mock_driver.get_security_group.assert_called_once_with(SG_NAME) - mock_get_sg_name.assert_called_once_with(LB_ID) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py deleted file mode 100644 index 2ac26f4a1e..0000000000 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2020 Red Hat, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from unittest import mock - -from taskflow import retry - -from octavia.controller.worker.v1.tasks import retry_tasks -import octavia.tests.unit.base as base - - -class TestRetryTasks(base.TestCase): - - def setUp(self): - super().setUp() - - @mock.patch('time.sleep') - def test_sleeping_retry_times_controller(self, mock_sleep): - retry_ctrlr = retry_tasks.SleepingRetryTimesController( - attempts=2, name='test_retry') - - # Test on_failure that should RETRY - history = ['boom'] - - result = retry_ctrlr.on_failure(history) - - self.assertEqual(retry.RETRY, result) - - # Test on_failure retries exhausted, should REVERT - history = ['boom', 'bang', 'pow'] - - result = retry_ctrlr.on_failure(history) - - self.assertEqual(retry.REVERT, result) - - # Test revert - should not raise - retry_ctrlr.revert(history) diff --git a/octavia/tests/unit/controller/worker/v1/test_controller_worker.py b/octavia/tests/unit/controller/worker/v1/test_controller_worker.py deleted file mode 100644 index ed9224c879..0000000000 --- a/octavia/tests/unit/controller/worker/v1/test_controller_worker.py +++ /dev/null @@ -1,2096 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from unittest import mock - -from oslo_config import cfg -from oslo_config import fixture as oslo_fixture -from oslo_utils import uuidutils - -from octavia.common import base_taskflow -from octavia.common import constants -from octavia.common import data_models -from octavia.common import exceptions -from octavia.controller.worker.v1 import controller_worker -import octavia.tests.unit.base as base - - -AMP_ID = uuidutils.generate_uuid() -LB_ID = uuidutils.generate_uuid() -POOL_ID = uuidutils.generate_uuid() -HM_ID = uuidutils.generate_uuid() -MEMBER_ID = uuidutils.generate_uuid() -COMPUTE_ID = uuidutils.generate_uuid() -L7POLICY_ID = uuidutils.generate_uuid() -L7RULE_ID = uuidutils.generate_uuid() -HEALTH_UPDATE_DICT = {'delay': 1, 'timeout': 2} -LISTENER_UPDATE_DICT = {'name': 'test', 'description': 'test2'} -MEMBER_UPDATE_DICT = {'weight': 1, 'ip_address': '10.0.0.0'} -POOL_UPDATE_DICT = {'name': 'test', 'description': 'test2'} -L7POLICY_UPDATE_DICT = {'action': constants.L7POLICY_ACTION_REJECT} -L7RULE_UPDATE_DICT = { - 'type': constants.L7RULE_TYPE_PATH, - 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, - 'value': '/api'} - -_amphora_mock = mock.MagicMock() -_flow_mock = mock.MagicMock() -_health_mon_mock = mock.MagicMock() -_vip_mock = mock.MagicMock() -_listener_mock = mock.MagicMock() -_load_balancer_mock = mock.MagicMock() -_load_balancer_mock.listeners = [_listener_mock] -_load_balancer_mock.topology = constants.TOPOLOGY_SINGLE -_load_balancer_mock.flavor_id = None -_load_balancer_mock.availability_zone = None -_member_mock = mock.MagicMock() -_pool_mock = mock.MagicMock() -_l7policy_mock = mock.MagicMock() -_l7rule_mock = mock.MagicMock() -_create_map_flow_mock = mock.MagicMock() -_amphora_mock.load_balancer_id = LB_ID -_amphora_mock.id = AMP_ID -_db_session = mock.MagicMock() - -CONF = cfg.CONF - - -class TestException(Exception): - - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - - -@mock.patch('octavia.db.repositories.AmphoraRepository.get', - return_value=_amphora_mock) -@mock.patch('octavia.db.repositories.HealthMonitorRepository.get', - return_value=_health_mon_mock) -@mock.patch('octavia.db.repositories.LoadBalancerRepository.get', - return_value=_load_balancer_mock) -@mock.patch('octavia.db.repositories.ListenerRepository.get', - return_value=_listener_mock) -@mock.patch('octavia.db.repositories.L7PolicyRepository.get', - return_value=_l7policy_mock) -@mock.patch('octavia.db.repositories.L7RuleRepository.get', - return_value=_l7rule_mock) -@mock.patch('octavia.db.repositories.MemberRepository.get', - return_value=_member_mock) -@mock.patch('octavia.db.repositories.PoolRepository.get', - return_value=_pool_mock) -@mock.patch('octavia.common.base_taskflow.BaseTaskFlowEngine.taskflow_load', - return_value=_flow_mock) -@mock.patch('taskflow.listeners.logging.DynamicLoggingListener') -@mock.patch('octavia.db.api.get_session', return_value=_db_session) -class TestControllerWorker(base.TestCase): - - def setUp(self): - - self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF)) - - _pool_mock.listeners = [_listener_mock] - _pool_mock.load_balancer = _load_balancer_mock - _health_mon_mock.pool = _pool_mock - _load_balancer_mock.amphorae = _amphora_mock - _load_balancer_mock.vip = _vip_mock - _listener_mock.load_balancer = _load_balancer_mock - _member_mock.pool = _pool_mock - _l7policy_mock.listener = _listener_mock - _l7rule_mock.l7policy = _l7policy_mock - - fetch_mock = mock.MagicMock(return_value=AMP_ID) - _flow_mock.storage.fetch = fetch_mock - - _pool_mock.id = POOL_ID - _health_mon_mock.pool_id = POOL_ID - _health_mon_mock.id = HM_ID - - super().setUp() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', - return_value='TEST') - def test_delete_amphora(self, - mock_get_delete_amp_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_amphora(_amphora_mock.id) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with('TEST')) - - mock_get_delete_amp_flow.assert_called_once_with(_amphora_mock) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'health_monitor_flows.HealthMonitorFlows.' - 'get_create_health_monitor_flow', - return_value=_flow_mock) - def test_create_health_monitor(self, - mock_get_create_hm_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_health_mon_repo_get.side_effect = [None, _health_mon_mock] - - cw = controller_worker.ControllerWorker() - cw.create_health_monitor(_health_mon_mock) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.HEALTH_MON: - _health_mon_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.POOL: - _pool_mock})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_health_mon_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'health_monitor_flows.HealthMonitorFlows.' - 'get_delete_health_monitor_flow', - return_value=_flow_mock) - def test_delete_health_monitor(self, - mock_get_delete_hm_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_health_monitor(HM_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.HEALTH_MON: - _health_mon_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.POOL: - _pool_mock})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'health_monitor_flows.HealthMonitorFlows.' - 'get_update_health_monitor_flow', - return_value=_flow_mock) - def test_update_health_monitor(self, - mock_get_update_hm_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _health_mon_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - cw.update_health_monitor(_health_mon_mock.id, - HEALTH_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.HEALTH_MON: - _health_mon_mock, - constants.POOL: - _pool_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.UPDATE_DICT: - HEALTH_UPDATE_DICT})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'listener_flows.ListenerFlows.get_create_listener_flow', - return_value=_flow_mock) - def test_create_listener(self, - mock_get_create_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_listener_repo_get.side_effect = [None, _listener_mock] - - cw = controller_worker.ControllerWorker() - cw.create_listener(LB_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.LOADBALANCER: - _load_balancer_mock, - constants.LISTENERS: - _load_balancer_mock.listeners})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_listener_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'listener_flows.ListenerFlows.get_delete_listener_flow', - return_value=_flow_mock) - def test_delete_listener(self, - mock_get_delete_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_listener(LB_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, store={constants.LISTENER: _listener_mock, - constants.LOADBALANCER: _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'listener_flows.ListenerFlows.get_update_listener_flow', - return_value=_flow_mock) - def test_update_listener(self, - mock_get_update_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _listener_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - cw.update_listener(LB_ID, LISTENER_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.LISTENER: _listener_mock, - constants.LOADBALANCER: - _load_balancer_mock, - constants.UPDATE_DICT: - LISTENER_UPDATE_DICT, - constants.LISTENERS: - [_listener_mock]})) - - _flow_mock.run.assert_called_once_with() - - def test_create_load_balancer_single_no_anti_affinity( - self, mock_api_get_session, - mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, - mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, - mock_listener_repo_get, mock_lb_repo_get, - mock_health_mon_repo_get, mock_amp_repo_get): - # Test the code path with Nova anti-affinity disabled - self.conf.config(group="nova", enable_anti_affinity=False) - self._test_create_load_balancer_single( - mock_api_get_session, - mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, - mock_member_repo_get, mock_l7rule_repo_get, - mock_l7policy_repo_get, mock_listener_repo_get, - mock_lb_repo_get, mock_health_mon_repo_get, mock_amp_repo_get) - - def test_create_load_balancer_single_anti_affinity( - self, mock_api_get_session, - mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, - mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, - mock_listener_repo_get, mock_lb_repo_get, - mock_health_mon_repo_get, mock_amp_repo_get): - # Test the code path with Nova anti-affinity enabled - self.conf.config(group="nova", enable_anti_affinity=True) - self._test_create_load_balancer_single( - mock_api_get_session, - mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, - mock_member_repo_get, mock_l7rule_repo_get, - mock_l7policy_repo_get, mock_listener_repo_get, - mock_lb_repo_get, mock_health_mon_repo_get, mock_amp_repo_get) - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_create_load_balancer_flow', - return_value=_flow_mock) - def _test_create_load_balancer_single( - self, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get, - mock_get_create_load_balancer_flow): - - # Test the code path with an SINGLE topology - self.conf.config(group="controller_worker", - loadbalancer_topology=constants.TOPOLOGY_SINGLE) - _flow_mock.reset_mock() - mock_taskflow_load.reset_mock() - mock_eng = mock.Mock() - mock_taskflow_load.return_value = mock_eng - store = { - constants.LOADBALANCER_ID: LB_ID, - 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, - constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None, constants.AVAILABILITY_ZONE: None, - constants.SERVER_GROUP_ID: None - } - lb_mock = mock.MagicMock() - lb_mock.listeners = [] - lb_mock.topology = constants.TOPOLOGY_SINGLE - mock_lb_repo_get.side_effect = [None, None, None, lb_mock] - - cw = controller_worker.ControllerWorker() - cw.create_load_balancer(LB_ID) - - mock_get_create_load_balancer_flow.assert_called_with( - topology=constants.TOPOLOGY_SINGLE, listeners=[]) - mock_taskflow_load.assert_called_with( - mock_get_create_load_balancer_flow.return_value, store=store) - mock_eng.run.assert_any_call() - self.assertEqual(4, mock_lb_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_create_load_balancer_flow', - return_value=_flow_mock) - def test_create_load_balancer_active_standby( - self, - mock_get_create_load_balancer_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - self.conf.config( - group="controller_worker", - loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) - - _flow_mock.reset_mock() - mock_taskflow_load.reset_mock() - mock_eng = mock.Mock() - mock_taskflow_load.return_value = mock_eng - store = { - constants.LOADBALANCER_ID: LB_ID, - 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, - constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None, constants.SERVER_GROUP_ID: None, - constants.AVAILABILITY_ZONE: None, - } - setattr(mock_lb_repo_get.return_value, 'topology', - constants.TOPOLOGY_ACTIVE_STANDBY) - setattr(mock_lb_repo_get.return_value, 'listeners', []) - - cw = controller_worker.ControllerWorker() - cw.create_load_balancer(LB_ID) - - mock_get_create_load_balancer_flow.assert_called_with( - topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[]) - mock_taskflow_load.assert_called_with( - mock_get_create_load_balancer_flow.return_value, store=store) - mock_eng.run.assert_any_call() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_create_load_balancer_flow') - def test_create_load_balancer_full_graph_single( - self, - mock_get_create_load_balancer_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - self.conf.config( - group="controller_worker", - loadbalancer_topology=constants.TOPOLOGY_SINGLE) - - listeners = [data_models.Listener(id='listener1'), - data_models.Listener(id='listener2')] - lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, - topology=constants.TOPOLOGY_SINGLE) - mock_lb_repo_get.return_value = lb - mock_eng = mock.Mock() - mock_taskflow_load.return_value = mock_eng - store = { - constants.LOADBALANCER_ID: LB_ID, - 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, - constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None, constants.SERVER_GROUP_ID: None, - constants.AVAILABILITY_ZONE: None, - } - - cw = controller_worker.ControllerWorker() - cw.create_load_balancer(LB_ID) - - # mock_create_single_topology.assert_called_once() - # mock_create_active_standby_topology.assert_not_called() - mock_get_create_load_balancer_flow.assert_called_with( - topology=constants.TOPOLOGY_SINGLE, listeners=lb.listeners) - mock_taskflow_load.assert_called_with( - mock_get_create_load_balancer_flow.return_value, store=store) - mock_eng.run.assert_any_call() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_create_load_balancer_flow') - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows._create_single_topology') - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows._create_active_standby_topology') - def test_create_load_balancer_full_graph_active_standby( - self, - mock_create_active_standby_topology, - mock_create_single_topology, - mock_get_create_load_balancer_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - self.conf.config( - group="controller_worker", - loadbalancer_topology=constants.TOPOLOGY_ACTIVE_STANDBY) - - listeners = [data_models.Listener(id='listener1'), - data_models.Listener(id='listener2')] - lb = data_models.LoadBalancer( - id=LB_ID, listeners=listeners, - topology=constants.TOPOLOGY_ACTIVE_STANDBY) - mock_lb_repo_get.return_value = lb - mock_eng = mock.Mock() - mock_taskflow_load.return_value = mock_eng - store = { - constants.LOADBALANCER_ID: LB_ID, - 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, - constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None, constants.SERVER_GROUP_ID: None, - constants.AVAILABILITY_ZONE: None, - } - - cw = controller_worker.ControllerWorker() - cw.create_load_balancer(LB_ID) - - mock_get_create_load_balancer_flow.assert_called_with( - topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=lb.listeners) - mock_taskflow_load.assert_called_with( - mock_get_create_load_balancer_flow.return_value, store=store) - mock_eng.run.assert_any_call() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_delete_load_balancer_flow', - return_value=(_flow_mock, {'test': 'test'})) - def test_delete_load_balancer_without_cascade(self, - mock_get_delete_lb_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_load_balancer(LB_ID, cascade=False) - - mock_lb_repo_get.assert_called_once_with( - _db_session, - id=LB_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.LOADBALANCER: - _load_balancer_mock, - constants.SERVER_GROUP_ID: - _load_balancer_mock.server_group_id, - 'test': 'test' - } - ) - ) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_cascade_delete_load_balancer_flow', - return_value=(_flow_mock, {'test': 'test'})) - def test_delete_load_balancer_with_cascade(self, - mock_get_delete_lb_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_load_balancer(LB_ID, cascade=True) - - mock_lb_repo_get.assert_called_once_with( - _db_session, - id=LB_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.LOADBALANCER: - _load_balancer_mock, - constants.SERVER_GROUP_ID: - _load_balancer_mock.server_group_id, - 'test': 'test' - } - ) - ) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_update_load_balancer_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.ListenerRepository.get_all', - return_value=([_listener_mock], None)) - def test_update_load_balancer(self, - mock_listener_repo_get_all, - mock_get_update_lb_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _load_balancer_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - change = 'TEST2' - cw.update_load_balancer(LB_ID, change) - - mock_lb_repo_get.assert_called_once_with( - _db_session, - id=LB_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.UPDATE_DICT: change, - constants.LOADBALANCER: - _load_balancer_mock, - constants.LISTENERS: - [_listener_mock]})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'member_flows.MemberFlows.get_create_member_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict') - def test_create_member(self, - mock_get_az_metadata_dict, - mock_get_create_member_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_get_az_metadata_dict.return_value = {} - mock_member_repo_get.side_effect = [None, _member_mock] - - cw = controller_worker.ControllerWorker() - cw.create_member(MEMBER_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.MEMBER: _member_mock, - constants.LISTENERS: [_listener_mock], - constants.LOADBALANCER: _load_balancer_mock, - constants.LOADBALANCER_ID: _load_balancer_mock.id, - constants.POOL: _pool_mock, - constants.AVAILABILITY_ZONE: {}})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_member_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'member_flows.MemberFlows.get_delete_member_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict') - def test_delete_member(self, - mock_get_az_metadata_dict, - mock_get_delete_member_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_get_az_metadata_dict.return_value = {} - cw = controller_worker.ControllerWorker() - cw.delete_member(MEMBER_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, store={constants.MEMBER: _member_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.LOADBALANCER_ID: - _load_balancer_mock.id, - constants.POOL: - _pool_mock, - constants.AVAILABILITY_ZONE: {}})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'member_flows.MemberFlows.get_update_member_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict') - def test_update_member(self, - mock_get_az_metadata_dict, - mock_get_update_member_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _member_mock.provisioning_status = constants.PENDING_UPDATE - mock_get_az_metadata_dict.return_value = {} - cw = controller_worker.ControllerWorker() - cw.update_member(MEMBER_ID, MEMBER_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.MEMBER: _member_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.POOL: - _pool_mock, - constants.UPDATE_DICT: - MEMBER_UPDATE_DICT, - constants.AVAILABILITY_ZONE: {}})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'member_flows.MemberFlows.get_batch_update_members_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict') - def test_batch_update_members(self, - mock_get_az_metadata_dict, - mock_get_batch_update_members_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_member_repo_get.side_effect = [None, _member_mock, - _member_mock, _member_mock] - mock_get_az_metadata_dict.return_value = {} - cw = controller_worker.ControllerWorker() - cw.batch_update_members([9], [11], [MEMBER_UPDATE_DICT]) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={ - constants.LISTENERS: [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.LOADBALANCER_ID: - _load_balancer_mock.id, - constants.POOL: _pool_mock, - constants.AVAILABILITY_ZONE: {}})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(4, mock_member_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'pool_flows.PoolFlows.get_create_pool_flow', - return_value=_flow_mock) - def test_create_pool(self, - mock_get_create_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_pool_repo_get.side_effect = [None, _pool_mock] - - cw = controller_worker.ControllerWorker() - cw.create_pool(POOL_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.POOL: _pool_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_pool_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'pool_flows.PoolFlows.get_delete_pool_flow', - return_value=_flow_mock) - def test_delete_pool(self, - mock_get_delete_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_pool(POOL_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.POOL: _pool_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'pool_flows.PoolFlows.get_update_pool_flow', - return_value=_flow_mock) - def test_update_pool(self, - mock_get_update_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _pool_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - cw.update_pool(POOL_ID, POOL_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.POOL: _pool_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.UPDATE_DICT: - POOL_UPDATE_DICT})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7policy_flows.L7PolicyFlows.get_create_l7policy_flow', - return_value=_flow_mock) - def test_create_l7policy(self, - mock_get_create_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_l7policy_repo_get.side_effect = [None, _l7policy_mock] - - cw = controller_worker.ControllerWorker() - cw.create_l7policy(L7POLICY_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_l7policy_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7policy_flows.L7PolicyFlows.get_delete_l7policy_flow', - return_value=_flow_mock) - def test_delete_l7policy(self, - mock_get_delete_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_l7policy(L7POLICY_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7policy_flows.L7PolicyFlows.get_update_l7policy_flow', - return_value=_flow_mock) - def test_update_l7policy(self, - mock_get_update_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _l7policy_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - cw.update_l7policy(L7POLICY_ID, L7POLICY_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.UPDATE_DICT: - L7POLICY_UPDATE_DICT})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7rule_flows.L7RuleFlows.get_create_l7rule_flow', - return_value=_flow_mock) - def test_create_l7rule(self, - mock_get_create_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - mock_l7rule_repo_get.side_effect = [None, _l7rule_mock] - - cw = controller_worker.ControllerWorker() - cw.create_l7rule(L7RULE_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7RULE: _l7rule_mock, - constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - self.assertEqual(2, mock_l7rule_repo_get.call_count) - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7rule_flows.L7RuleFlows.get_delete_l7rule_flow', - return_value=_flow_mock) - def test_delete_l7rule(self, - mock_get_delete_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - - cw = controller_worker.ControllerWorker() - cw.delete_l7rule(L7RULE_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7RULE: _l7rule_mock, - constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'l7rule_flows.L7RuleFlows.get_update_l7rule_flow', - return_value=_flow_mock) - def test_update_l7rule(self, - mock_get_update_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - _l7rule_mock.provisioning_status = constants.PENDING_UPDATE - - cw = controller_worker.ControllerWorker() - cw.update_l7rule(L7RULE_ID, L7RULE_UPDATE_DICT) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.L7RULE: _l7rule_mock, - constants.L7POLICY: _l7policy_mock, - constants.LISTENERS: - [_listener_mock], - constants.LOADBALANCER: - _load_balancer_mock, - constants.UPDATE_DICT: - L7RULE_UPDATE_DICT})) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amphora_lb_single(self, - mock_update, - mock_get_failover_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.reset_mock() - mock_lb_repo_get.return_value = _load_balancer_mock - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.FLAVOR: {'loadbalancer_topology': - _load_balancer_mock.topology}, - constants.LOADBALANCER: _load_balancer_mock, - constants.LOADBALANCER_ID: - _load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.SERVER_GROUP_ID: - _load_balancer_mock.server_group_id, - constants.AVAILABILITY_ZONE: {}, - constants.VIP: _load_balancer_mock.vip - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amphora_lb_act_stdby(self, - mock_update, - mock_get_failover_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - load_balancer_mock.flavor_id = None - load_balancer_mock.availability_zone = None - load_balancer_mock.vip = _vip_mock - - mock_lb_repo_get.return_value = load_balancer_mock - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.FLAVOR: {'loadbalancer_topology': - load_balancer_mock.topology}, - constants.LOADBALANCER: load_balancer_mock, - constants.LOADBALANCER_ID: load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.AVAILABILITY_ZONE: {}, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.VIP: load_balancer_mock.vip - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amphora_unknown_topology(self, - mock_update, - mock_get_failover_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = 'bogus' - load_balancer_mock.flavor_id = None - load_balancer_mock.availability_zone = None - load_balancer_mock.vip = _vip_mock - - mock_lb_repo_get.return_value = load_balancer_mock - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.FLAVOR: {'loadbalancer_topology': - load_balancer_mock.topology}, - constants.LOADBALANCER: load_balancer_mock, - constants.LOADBALANCER_ID: load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.AVAILABILITY_ZONE: {}, - constants.VIP: load_balancer_mock.vip - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amphora_with_flavor(self, - mock_update, - mock_get_failover_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = constants.TOPOLOGY_SINGLE - load_balancer_mock.flavor_id = uuidutils.generate_uuid() - load_balancer_mock.availability_zone = None - load_balancer_mock.vip = _vip_mock - mock_get_flavor_meta.return_value = {'taste': 'spicy'} - - mock_lb_repo_get.return_value = load_balancer_mock - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.FLAVOR: {'loadbalancer_topology': - load_balancer_mock.topology, - 'taste': 'spicy'}, - constants.LOADBALANCER: load_balancer_mock, - constants.LOADBALANCER_ID: load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.SERVER_GROUP_ID: None, - constants.AVAILABILITY_ZONE: {}, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.VIP: load_balancer_mock.vip - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amphora_with_az(self, - mock_update, - mock_get_failover_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = 'bogus' - load_balancer_mock.flavor_id = None - load_balancer_mock.availability_zone = uuidutils.generate_uuid() - load_balancer_mock.vip = _vip_mock - mock_get_az_meta.return_value = {'planet': 'jupiter'} - - mock_lb_repo_get.return_value = load_balancer_mock - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.FLAVOR: {'loadbalancer_topology': - load_balancer_mock.topology}, - constants.LOADBALANCER: load_balancer_mock, - constants.LOADBALANCER_ID: load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.AVAILABILITY_ZONE: {'planet': 'jupiter'}, - constants.VIP: load_balancer_mock.vip - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' - 'AmphoraFlows.get_failover_amphora_flow') - def test_failover_amp_missing_amp(self, - mock_get_amp_failover, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - mock_amp_repo_get.return_value = None - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - mock_get_amp_failover.assert_not_called() - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amp_flow_exception(self, - mock_update, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - mock_amphora = mock.MagicMock() - mock_amphora.id = AMP_ID - mock_amphora.load_balancer_id = LB_ID - mock_amp_repo_get.return_value = mock_amphora - - mock_lb_repo_get.side_effect = TestException('boom') - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - mock_update.assert_called_with(_db_session, LB_ID, - provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_amp_flow_exception_reraise(self, - mock_update, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - mock_amphora = mock.MagicMock() - mock_amphora.id = AMP_ID - mock_amphora.load_balancer_id = LB_ID - mock_amp_repo_get.return_value = mock_amphora - - mock_lb_repo_get.side_effect = TestException('boom') - cw = controller_worker.ControllerWorker() - self.assertRaises(TestException, - cw.failover_amphora, - AMP_ID, reraise=True) - - @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' - 'AmphoraFlows.get_failover_amphora_flow') - def test_failover_amp_no_lb(self, - mock_get_failover_amp_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.run.reset_mock() - FAKE_FLOW = 'FAKE_FLOW' - mock_amphora = mock.MagicMock() - mock_amphora.load_balancer_id = None - mock_amphora.id = AMP_ID - mock_amphora.status = constants.AMPHORA_READY - mock_amp_repo_get.return_value = mock_amphora - mock_get_failover_amp_flow.return_value = FAKE_FLOW - expected_stored_params = {constants.AVAILABILITY_ZONE: {}, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {}, - constants.LOADBALANCER: None, - constants.LOADBALANCER_ID: None, - constants.SERVER_GROUP_ID: None, - constants.VIP: None} - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - mock_get_failover_amp_flow.assert_called_once_with(mock_amphora, None) - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(FAKE_FLOW, store=expected_stored_params)) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') - def test_failover_deleted_amphora(self, - mock_delete, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - mock_taskflow_load.reset_mock() - mock_amphora = mock.MagicMock() - mock_amphora.id = AMP_ID - mock_amphora.status = constants.DELETED - mock_amp_repo_get.return_value = mock_amphora - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID) - mock_taskflow_load.assert_not_called() - - def test_get_amphorae_for_failover_single(self, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - amphora1_mock = mock.MagicMock() - amphora1_mock.status = constants.AMPHORA_ALLOCATED - amphora2_mock = mock.MagicMock() - amphora2_mock.status = constants.DELETED - - load_balancer_mock = mock.MagicMock() - load_balancer_mock.topology = constants.TOPOLOGY_SINGLE - load_balancer_mock.amphorae = [amphora1_mock, amphora2_mock] - - cw = controller_worker.ControllerWorker() - result = cw._get_amphorae_for_failover(load_balancer_mock) - - self.assertEqual([amphora1_mock], result) - - @mock.patch('octavia.common.utils.get_amphora_driver') - def test_get_amphorae_for_failover_act_stdby(self, - mock_get_amp_driver, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - # Note: This test uses three amphora even though we only have - # two per load balancer to properly test the ordering from - # this method. - amp_driver_mock = mock.MagicMock() - amp_driver_mock.get_interface_from_ip.side_effect = [ - 'fake0', None, 'fake1'] - mock_get_amp_driver.return_value = amp_driver_mock - backup_amphora_mock = mock.MagicMock() - backup_amphora_mock.status = constants.AMPHORA_ALLOCATED - deleted_amphora_mock = mock.MagicMock() - deleted_amphora_mock.status = constants.DELETED - master_amphora_mock = mock.MagicMock() - master_amphora_mock.status = constants.AMPHORA_ALLOCATED - bogus_amphora_mock = mock.MagicMock() - bogus_amphora_mock.status = constants.AMPHORA_ALLOCATED - - load_balancer_mock = mock.MagicMock() - load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - load_balancer_mock.amphorae = [ - master_amphora_mock, deleted_amphora_mock, backup_amphora_mock, - bogus_amphora_mock] - - cw = controller_worker.ControllerWorker() - result = cw._get_amphorae_for_failover(load_balancer_mock) - - self.assertEqual([master_amphora_mock, bogus_amphora_mock, - backup_amphora_mock], result) - - @mock.patch('octavia.common.utils.get_amphora_driver') - def test_get_amphorae_for_failover_act_stdby_net_split( - self, mock_get_amp_driver, mock_api_get_session, - mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, - mock_member_repo_get, mock_l7rule_repo_get, mock_l7policy_repo_get, - mock_listener_repo_get, mock_lb_repo_get, mock_health_mon_repo_get, - mock_amp_repo_get): - # Case where the amps can't see eachother and somehow end up with - # two amphora with an interface. This is highly unlikely as the - # higher priority amphora should get the IP in a net split, but - # let's test the code for this odd case. - # Note: This test uses three amphora even though we only have - # two per load balancer to properly test the ordering from - # this method. - amp_driver_mock = mock.MagicMock() - amp_driver_mock.get_interface_from_ip.side_effect = [ - 'fake0', 'fake1'] - mock_get_amp_driver.return_value = amp_driver_mock - backup_amphora_mock = mock.MagicMock() - backup_amphora_mock.status = constants.AMPHORA_ALLOCATED - deleted_amphora_mock = mock.MagicMock() - deleted_amphora_mock.status = constants.DELETED - master_amphora_mock = mock.MagicMock() - master_amphora_mock.status = constants.AMPHORA_ALLOCATED - - load_balancer_mock = mock.MagicMock() - load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - load_balancer_mock.amphorae = [ - backup_amphora_mock, deleted_amphora_mock, master_amphora_mock] - - cw = controller_worker.ControllerWorker() - result = cw._get_amphorae_for_failover(load_balancer_mock) - - self.assertEqual([backup_amphora_mock, master_amphora_mock], result) - - def test_get_amphorae_for_failover_bogus_topology(self, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - load_balancer_mock = mock.MagicMock() - load_balancer_mock.topology = 'bogus' - - cw = controller_worker.ControllerWorker() - self.assertRaises(exceptions.InvalidTopology, - cw._get_amphorae_for_failover, - load_balancer_mock) - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_failover_LB_flow') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker._get_amphorae_for_failover') - def test_failover_loadbalancer_single(self, - mock_get_amps_for_failover, - mock_get_failover_lb_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - FAKE_FLOW = 'FAKE_FLOW' - _flow_mock.reset_mock() - mock_lb_repo_get.return_value = _load_balancer_mock - mock_get_amps_for_failover.return_value = [_amphora_mock] - mock_get_failover_lb_flow.return_value = FAKE_FLOW - - expected_flavor = {constants.LOADBALANCER_TOPOLOGY: - _load_balancer_mock.topology} - expected_flow_store = {constants.LOADBALANCER: _load_balancer_mock, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.LOADBALANCER_ID: - _load_balancer_mock.id, - constants.SERVER_GROUP_ID: - _load_balancer_mock.server_group_id, - constants.FLAVOR: expected_flavor, - constants.AVAILABILITY_ZONE: {}} - - cw = controller_worker.ControllerWorker() - cw.failover_loadbalancer(LB_ID) - - mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) - mock_get_amps_for_failover.assert_called_once_with(_load_balancer_mock) - mock_get_failover_lb_flow.assert_called_once_with([_amphora_mock], - _load_balancer_mock) - mock_taskflow_load.assert_called_once_with(FAKE_FLOW, - store=expected_flow_store) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_failover_LB_flow') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker._get_amphorae_for_failover') - def test_failover_loadbalancer_act_stdby(self, - mock_get_amps_for_failover, - mock_get_failover_lb_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - FAKE_FLOW = 'FAKE_FLOW' - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - load_balancer_mock.flavor_id = None - load_balancer_mock.availability_zone = None - load_balancer_mock.vip = _vip_mock - mock_lb_repo_get.return_value = load_balancer_mock - mock_get_amps_for_failover.return_value = [_amphora_mock, - _amphora_mock] - mock_get_failover_lb_flow.return_value = FAKE_FLOW - - expected_flavor = {constants.LOADBALANCER_TOPOLOGY: - load_balancer_mock.topology} - expected_flow_store = {constants.LOADBALANCER: load_balancer_mock, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.LOADBALANCER_ID: - load_balancer_mock.id, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.FLAVOR: expected_flavor, - constants.AVAILABILITY_ZONE: {}} - - cw = controller_worker.ControllerWorker() - cw.failover_loadbalancer(LB_ID) - - mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) - mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) - mock_get_failover_lb_flow.assert_called_once_with( - [_amphora_mock, _amphora_mock], load_balancer_mock) - mock_taskflow_load.assert_called_once_with(FAKE_FLOW, - store=expected_flow_store) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - def test_failover_loadbalancer_no_lb(self, - mock_lb_repo_update, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - mock_lb_repo_get.return_value = None - - cw = controller_worker.ControllerWorker() - cw.failover_loadbalancer(LB_ID) - - mock_lb_repo_update.assert_called_once_with( - _db_session, LB_ID, provisioning_status=constants.ERROR) - - @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker._get_amphorae_for_failover') - def test_failover_loadbalancer_with_bogus_topology( - self, mock_get_amps_for_failover, mock_lb_repo_update, - mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, - mock_pool_repo_get, mock_member_repo_get, mock_l7rule_repo_get, - mock_l7policy_repo_get, mock_listener_repo_get, mock_lb_repo_get, - mock_health_mon_repo_get, mock_amp_repo_get): - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.topology = 'bogus' - mock_lb_repo_get.return_value = load_balancer_mock - mock_get_amps_for_failover.return_value = [_amphora_mock] - - cw = controller_worker.ControllerWorker() - result = cw.failover_loadbalancer(LB_ID) - - self.assertIsNone(result) - mock_lb_repo_update.assert_called_once_with( - _db_session, LB_ID, provisioning_status=constants.ERROR) - mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) - mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_failover_LB_flow') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker._get_amphorae_for_failover') - def test_failover_loadbalancer_with_az(self, - mock_get_amps_for_failover, - mock_get_failover_lb_flow, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - FAKE_FLOW = 'FAKE_FLOW' - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY - load_balancer_mock.flavor_id = None - load_balancer_mock.availability_zone = uuidutils.generate_uuid() - load_balancer_mock.vip = _vip_mock - mock_lb_repo_get.return_value = load_balancer_mock - mock_get_amps_for_failover.return_value = [_amphora_mock] - mock_get_failover_lb_flow.return_value = FAKE_FLOW - mock_get_az_meta.return_value = {'planet': 'jupiter'} - - expected_flavor = {constants.LOADBALANCER_TOPOLOGY: - load_balancer_mock.topology} - expected_flow_store = {constants.LOADBALANCER: load_balancer_mock, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.LOADBALANCER_ID: - load_balancer_mock.id, - constants.FLAVOR: expected_flavor, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.AVAILABILITY_ZONE: { - 'planet': 'jupiter'}} - - cw = controller_worker.ControllerWorker() - cw.failover_loadbalancer(LB_ID) - - mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) - mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) - mock_get_failover_lb_flow.assert_called_once_with([_amphora_mock], - load_balancer_mock) - mock_taskflow_load.assert_called_once_with(FAKE_FLOW, - store=expected_flow_store) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={'taste': 'spicy'}) - @mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.' - 'LoadBalancerFlows.get_failover_LB_flow') - @mock.patch('octavia.controller.worker.v1.controller_worker.' - 'ControllerWorker._get_amphorae_for_failover') - def test_failover_loadbalancer_with_flavor(self, - mock_get_amps_for_failover, - mock_get_failover_lb_flow, - mock_get_flavor_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - FAKE_FLOW = 'FAKE_FLOW' - _flow_mock.reset_mock() - load_balancer_mock = mock.MagicMock() - load_balancer_mock.listeners = [_listener_mock] - load_balancer_mock.topology = constants.TOPOLOGY_SINGLE - load_balancer_mock.flavor_id = uuidutils.generate_uuid() - load_balancer_mock.availability_zone = None - load_balancer_mock.vip = _vip_mock - mock_lb_repo_get.return_value = load_balancer_mock - mock_get_amps_for_failover.return_value = [_amphora_mock, - _amphora_mock] - mock_get_failover_lb_flow.return_value = FAKE_FLOW - - expected_flavor = {'taste': 'spicy', constants.LOADBALANCER_TOPOLOGY: - load_balancer_mock.topology} - expected_flow_store = {constants.LOADBALANCER: load_balancer_mock, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.LOADBALANCER_ID: - load_balancer_mock.id, - constants.FLAVOR: expected_flavor, - constants.SERVER_GROUP_ID: - load_balancer_mock.server_group_id, - constants.AVAILABILITY_ZONE: {}} - - cw = controller_worker.ControllerWorker() - cw.failover_loadbalancer(LB_ID) - - mock_lb_repo_get.assert_called_once_with(_db_session, id=LB_ID) - mock_get_amps_for_failover.assert_called_once_with(load_balancer_mock) - mock_get_failover_lb_flow.assert_called_once_with( - [_amphora_mock, _amphora_mock], load_balancer_mock) - mock_taskflow_load.assert_called_once_with(FAKE_FLOW, - store=expected_flow_store) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' - 'get_availability_zone_metadata_dict', return_value={}) - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict', return_value={}) - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.get_failover_amphora_flow', - return_value=_flow_mock) - @mock.patch( - 'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora', - return_value=_load_balancer_mock) - def test_failover_amphora_anti_affinity(self, - mock_get_lb_for_amphora, - mock_get_update_listener_flow, - mock_get_flavor_meta, - mock_get_az_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - - self.conf.config(group="nova", enable_anti_affinity=True) - _flow_mock.reset_mock() - _load_balancer_mock.server_group_id = "123" - - cw = controller_worker.ControllerWorker() - cw.failover_amphora(AMP_ID) - - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with( - _flow_mock, - store={constants.LOADBALANCER_ID: _load_balancer_mock.id, - constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {'loadbalancer_topology': - _load_balancer_mock.topology}, - constants.AVAILABILITY_ZONE: {}, - constants.LOADBALANCER: _load_balancer_mock, - constants.VIP: _load_balancer_mock.vip, - constants.SERVER_GROUP_ID: - _load_balancer_mock.server_group_id - })) - - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow', - return_value=_flow_mock) - def test_amphora_cert_rotation(self, - mock_get_update_listener_flow, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.reset_mock() - cw = controller_worker.ControllerWorker() - cw.amphora_cert_rotation(AMP_ID) - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.AMPHORA: _amphora_mock, - constants.AMPHORA_ID: - _amphora_mock.id})) - _flow_mock.run.assert_called_once_with() - - @mock.patch('octavia.db.repositories.FlavorRepository.' - 'get_flavor_metadata_dict') - @mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora') - @mock.patch('octavia.controller.worker.v1.flows.' - 'amphora_flows.AmphoraFlows.update_amphora_config_flow', - return_value=_flow_mock) - def test_update_amphora_agent_config(self, - mock_update_flow, - mock_get_lb_for_amp, - mock_flavor_meta, - mock_api_get_session, - mock_dyn_log_listener, - mock_taskflow_load, - mock_pool_repo_get, - mock_member_repo_get, - mock_l7rule_repo_get, - mock_l7policy_repo_get, - mock_listener_repo_get, - mock_lb_repo_get, - mock_health_mon_repo_get, - mock_amp_repo_get): - _flow_mock.reset_mock() - mock_lb = mock.MagicMock() - mock_lb.flavor_id = 'vanilla' - mock_get_lb_for_amp.return_value = mock_lb - mock_flavor_meta.return_value = {'test': 'dict'} - cw = controller_worker.ControllerWorker() - cw.update_amphora_agent_config(AMP_ID) - - mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) - mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) - mock_flavor_meta.assert_called_once_with(_db_session, 'vanilla') - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.AMPHORA: _amphora_mock, - constants.FLAVOR: {'test': 'dict'}})) - _flow_mock.run.assert_called_once_with() - - # Test with no flavor - _flow_mock.reset_mock() - mock_amp_repo_get.reset_mock() - mock_get_lb_for_amp.reset_mock() - mock_flavor_meta.reset_mock() - base_taskflow.BaseTaskFlowEngine.taskflow_load.reset_mock() - mock_lb.flavor_id = None - cw.update_amphora_agent_config(AMP_ID) - mock_amp_repo_get.assert_called_once_with(_db_session, id=AMP_ID) - mock_get_lb_for_amp.assert_called_once_with(_db_session, AMP_ID) - mock_flavor_meta.assert_not_called() - (base_taskflow.BaseTaskFlowEngine.taskflow_load. - assert_called_once_with(_flow_mock, - store={constants.AMPHORA: _amphora_mock, - constants.FLAVOR: {}})) - _flow_mock.run.assert_called_once_with() diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py index 3ea894ddb0..4fd0a947a2 100644 --- a/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py @@ -71,7 +71,7 @@ _session_mock = mock.MagicMock() @mock.patch('octavia.db.repositories.ListenerRepository.get', return_value=_listener_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock) -@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG') +@mock.patch('octavia.controller.worker.v2.tasks.amphora_driver_tasks.LOG') @mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) @mock.patch('stevedore.driver.DriverManager.driver') class TestAmphoraDriverTasks(base.TestCase): diff --git a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py index 0f0d50609a..e9fd0d3235 100644 --- a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py +++ b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py @@ -1820,7 +1820,7 @@ class TestControllerWorker(base.TestCase): flow_utils.get_failover_amphora_flow, mock_amphora.to_dict(), 1, store=expected_stored_params) - @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' + @mock.patch('octavia.controller.worker.v2.flows.amphora_flows.' 'AmphoraFlows.get_failover_amphora_flow') def test_failover_amp_missing_amp(self, mock_get_amp_failover, @@ -1893,7 +1893,7 @@ class TestControllerWorker(base.TestCase): cw.failover_amphora, AMP_ID, reraise=True) - @mock.patch('octavia.controller.worker.v1.flows.amphora_flows.' + @mock.patch('octavia.controller.worker.v2.flows.amphora_flows.' 'AmphoraFlows.get_failover_amphora_flow') def test_failover_amp_no_lb(self, mock_get_failover_amp_flow, diff --git a/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml b/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml new file mode 100644 index 0000000000..6dfa25de09 --- /dev/null +++ b/releasenotes/notes/removing-amphorav1-ff43992c07a2071d.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The *amphorav1* provider was removed. It is recommended to the users who + have kept using it to switch to the default *amphora* provider, which is an + alias for the *amphorav2* provider. +deprecations: + - | + The deprecated *amphorav1* provider was removed. The default + provider *amphora* is still an alias for the *amphorav2* provider. diff --git a/setup.cfg b/setup.cfg index f50ec6f45e..aa5dd846f7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -60,7 +60,6 @@ octavia.api.drivers = amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver # octavia is an alias for backward compatibility octavia = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver - amphorav1 = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver octavia.amphora.drivers = amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_api_driver:HaproxyAmphoraLoadBalancerDriver @@ -96,7 +95,7 @@ octavia.cert_manager = octavia.barbican_auth = barbican_acl_auth = octavia.certificates.common.auth.barbican_acl:BarbicanACLAuth octavia.plugins = - hot_plug_plugin = octavia.controller.worker.v1.controller_worker:ControllerWorker + hot_plug_plugin = octavia.controller.worker.v2.controller_worker:ControllerWorker octavia.worker.jobboard_driver = redis_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:RedisTaskFlowDriver zookeeper_taskflow_driver = octavia.controller.worker.v2.taskflow_jobboard_driver:ZookeeperTaskFlowDriver diff --git a/tools/check_unit_test_structure.sh b/tools/check_unit_test_structure.sh index 642f109e22..c7f226f257 100755 --- a/tools/check_unit_test_structure.sh +++ b/tools/check_unit_test_structure.sh @@ -13,7 +13,6 @@ test_files=$(find ${test_path} -iname 'test_*.py') ignore_regexes=( "^amphorae/drivers/haproxy/test_rest_api_driver_0_5.py$" "^amphorae/drivers/haproxy/test_rest_api_driver_1_0.py$" - "^controller/worker/v1/tasks/test_database_tasks_quota.py$" "^controller/worker/v2/tasks/test_database_tasks_quota.py$" ) diff --git a/tools/flow-list.txt b/tools/flow-list.txt deleted file mode 100644 index 9ac3194c44..0000000000 --- a/tools/flow-list.txt +++ /dev/null @@ -1,32 +0,0 @@ -# List of TaskFlow flows that should be documented -# Some flows are used by other flows, so just list the primary flows here -# Format: -# module class flow -octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_create_amphora_flow -octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows get_failover_amphora_flow -octavia.controller.worker.v1.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow -octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow -octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow -octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow -octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow -octavia.controller.worker.v1.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow -octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_listener_flow -octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_create_all_listeners_flow -octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_delete_listener_flow -octavia.controller.worker.v1.flows.listener_flows ListenerFlows get_update_listener_flow -octavia.controller.worker.v1.flows.pool_flows PoolFlows get_create_pool_flow -octavia.controller.worker.v1.flows.pool_flows PoolFlows get_delete_pool_flow -octavia.controller.worker.v1.flows.pool_flows PoolFlows get_update_pool_flow -octavia.controller.worker.v1.flows.member_flows MemberFlows get_create_member_flow -octavia.controller.worker.v1.flows.member_flows MemberFlows get_delete_member_flow -octavia.controller.worker.v1.flows.member_flows MemberFlows get_update_member_flow -octavia.controller.worker.v1.flows.member_flows MemberFlows get_batch_update_members_flow -octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow -octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow -octavia.controller.worker.v1.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow -octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow -octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow -octavia.controller.worker.v1.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow -octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow -octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow -octavia.controller.worker.v1.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow