From 424c320b019d82345354073a3678f2d8bde52da8 Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Fri, 30 Jan 2015 00:48:32 +0000 Subject: [PATCH] Implements Octavia Controller Worker Co-Authored-By: Aishwarya Thangappa Co-Authored-By: German Eichberger Implements: blueprint controller-worker Change-Id: If44a70d6ada43673d827987081e7c760598523bd --- etc/octavia.conf | 31 + octavia/common/base_taskflow.py | 45 ++ octavia/common/config.py | 44 ++ octavia/common/constants.py | 53 +- octavia/common/exceptions.py | 14 +- octavia/controller/worker/__init__.py | 0 .../controller/worker/controller_worker.py | 469 +++++++++++ octavia/controller/worker/flows/__init__.py | 0 .../controller/worker/flows/amphora_flows.py | 115 +++ .../worker/flows/health_monitor_flows.py | 73 ++ .../controller/worker/flows/listener_flows.py | 73 ++ .../worker/flows/load_balancer_flows.py | 108 +++ .../controller/worker/flows/member_flows.py | 76 ++ octavia/controller/worker/flows/pool_flows.py | 72 ++ octavia/controller/worker/tasks/__init__.py | 0 .../worker/tasks/amphora_driver_tasks.py | 179 +++++ .../controller/worker/tasks/compute_tasks.py | 110 +++ .../worker/tasks/controller_tasks.py | 91 +++ .../controller/worker/tasks/database_tasks.py | 700 ++++++++++++++++ .../controller/worker/tasks/model_tasks.py | 33 + .../controller/worker/tasks/network_tasks.py | 235 ++++++ ...48660b6643f0_add_new_states_for_amphora.py | 51 ++ octavia/db/repositories.py | 45 +- .../tests/functional/db/test_repositories.py | 21 + .../tests/unit/common/test_base_taskflow.py | 64 ++ .../tests/unit/controller/worker/__init__.py | 0 .../unit/controller/worker/flows/__init__.py | 0 .../worker/flows/test_amphora_flows.py | 67 ++ .../worker/flows/test_health_monitor_flows.py | 72 ++ .../worker/flows/test_listener_flows.py | 67 ++ .../worker/flows/test_load_balancer_flows.py | 76 ++ .../worker/flows/test_member_flows.py | 69 ++ .../worker/flows/test_pool_flows.py | 70 ++ .../unit/controller/worker/tasks/__init__.py | 0 .../worker/tasks/test_amphora_driver_tasks.py | 237 ++++++ .../worker/tasks/test_compute_tasks.py | 141 ++++ .../worker/tasks/test_controller_tasks.py | 120 +++ .../worker/tasks/test_database_tasks.py | 746 ++++++++++++++++++ .../worker/tasks/test_model_tasks.py | 44 ++ .../worker/tasks/test_network_tasks.py | 211 +++++ .../worker/test_controller_worker.py | 602 ++++++++++++++ requirements.txt | 2 + setup.cfg | 10 + 43 files changed, 5230 insertions(+), 6 deletions(-) create mode 100644 octavia/common/base_taskflow.py create mode 100644 octavia/controller/worker/__init__.py create mode 100644 octavia/controller/worker/controller_worker.py create mode 100644 octavia/controller/worker/flows/__init__.py create mode 100644 octavia/controller/worker/flows/amphora_flows.py create mode 100644 octavia/controller/worker/flows/health_monitor_flows.py create mode 100644 octavia/controller/worker/flows/listener_flows.py create mode 100644 octavia/controller/worker/flows/load_balancer_flows.py create mode 100644 octavia/controller/worker/flows/member_flows.py create mode 100644 octavia/controller/worker/flows/pool_flows.py create mode 100644 octavia/controller/worker/tasks/__init__.py create mode 100644 octavia/controller/worker/tasks/amphora_driver_tasks.py create mode 100644 octavia/controller/worker/tasks/compute_tasks.py create mode 100644 octavia/controller/worker/tasks/controller_tasks.py create mode 100644 octavia/controller/worker/tasks/database_tasks.py create mode 100644 octavia/controller/worker/tasks/model_tasks.py create mode 100644 octavia/controller/worker/tasks/network_tasks.py create mode 100644 octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py create mode 100644 octavia/tests/unit/common/test_base_taskflow.py create mode 100644 octavia/tests/unit/controller/worker/__init__.py create mode 100644 octavia/tests/unit/controller/worker/flows/__init__.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_amphora_flows.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_listener_flows.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_member_flows.py create mode 100644 octavia/tests/unit/controller/worker/flows/test_pool_flows.py create mode 100644 octavia/tests/unit/controller/worker/tasks/__init__.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_controller_tasks.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_database_tasks.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_model_tasks.py create mode 100644 octavia/tests/unit/controller/worker/tasks/test_network_tasks.py create mode 100644 octavia/tests/unit/controller/worker/test_controller_worker.py diff --git a/etc/octavia.conf b/etc/octavia.conf index 577cb574ce..732f017bff 100644 --- a/etc/octavia.conf +++ b/etc/octavia.conf @@ -7,6 +7,8 @@ # bind_port = 9876 # api_handler = simulated_handler +# nova_region_name = + [database] # This line MUST be changed to actually run the plugin. # Example: @@ -57,3 +59,32 @@ # base_log_dir = /logs # connection_max_retries = 10 # connection_retry_threshold = 5 + +[controller_worker] +# amp_active_wait_sec = 10 +# Nova parameters to use when booting amphora +# amp_flavor_id = +# amp_image_id = +# amp_ssh_key = +# amp_network = +# amp_secgroup_list = + +# Amphora driver options are amphora_noop_driver, +# amphora_haproxy_rest_driver, +# amphora_haproxy_ssh_driver +# +# amphora_driver = amphora_noop_driver +# +# Compute driver options are compute_noop_driver +# compute_nova_driver +# +# compute_driver = compute_noop_driver +# +# Network driver options are network_noop_driver +# allowed_address_pairs_driver +# +# network_driver = network_noop_driver + +[task_flow] +# engine = serial +# max_workers = 5 diff --git a/octavia/common/base_taskflow.py b/octavia/common/base_taskflow.py new file mode 100644 index 0000000000..35ef974805 --- /dev/null +++ b/octavia/common/base_taskflow.py @@ -0,0 +1,45 @@ +# Copyright 2014-2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import concurrent.futures +from oslo.config import cfg +from taskflow import engines as tf_engines + + +CONF = cfg.CONF +CONF.import_group('task_flow', 'octavia.common.config') + + +class BaseTaskFlowEngine(object): + """This is the task flow engine + + Use this engine to start/load flows in the + code + """ + + def __init__(self): + self.executor = concurrent.futures.ThreadPoolExecutor( + max_workers=CONF.task_flow.max_workers) + + def _taskflow_load(self, flow, **kwargs): + eng = tf_engines.load( + flow, + engine_conf=CONF.task_flow.engine, + executor=self.executor, + **kwargs) + eng.compile() + eng.prepare() + + return eng diff --git a/octavia/common/config.py b/octavia/common/config.py index 09635c236c..b3c0836ff0 100644 --- a/octavia/common/config.py +++ b/octavia/common/config.py @@ -104,6 +104,48 @@ haproxy_amphora_opts = [ 'attempts.')) ] +controller_worker_opts = [ + cfg.IntOpt('amp_active_retries', + default=10, + help=_('Retry attempts to wait for Amphora to become active')), + cfg.IntOpt('amp_active_wait_sec', + default=10, + help=_('Seconds to wait for an Amphora to become active')), + cfg.StrOpt('amp_flavor_id', + default='', + help=_('Nova instance flavor id for the Amphora')), + cfg.StrOpt('amp_image_id', + default='', + help=_('Glance image id for the Amphora image to boot')), + cfg.StrOpt('amp_ssh_key', + default='', + help=_('SSH key to load into the Amphora')), + cfg.StrOpt('amp_network', + default='', + help=_('Network to attach to the Amphora')), + cfg.ListOpt('amp_secgroup_list', + default='', + help=_('List of security groups to attach to the Amphora')), + cfg.StrOpt('amphora_driver', + default='amphora_noop_driver', + help=_('Name of the amphora driver to use')), + cfg.StrOpt('compute_driver', + default='compute_noop_driver', + help=_('Name of the compute driver to use')), + cfg.StrOpt('network_driver', + default='network_noop_driver', + help=_('Name of the network driver to use')) +] + +task_flow_opts = [ + cfg.StrOpt('engine', + default='serial', + help=_('TaskFlow engine to use')), + cfg.IntOpt('max_workers', + default=5, + help=_('The maximum number of workers')) +] + core_cli_opts = [] # Register the configuration options @@ -111,6 +153,8 @@ cfg.CONF.register_opts(core_opts) cfg.CONF.register_opts(networking_opts, group='networking') cfg.CONF.register_opts(oslo_messaging_opts, group='oslo_messaging') cfg.CONF.register_opts(haproxy_amphora_opts, group='haproxy_amphora') +cfg.CONF.register_opts(controller_worker_opts, group='controller_worker') +cfg.CONF.register_opts(task_flow_opts, group='task_flow') cfg.CONF.register_cli_opts(core_cli_opts) cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token') cfg.CONF.register_opts(keystone_authtoken_v3_opts, diff --git a/octavia/common/constants.py b/octavia/common/constants.py index 4f4e698322..7b0a313d5e 100644 --- a/octavia/common/constants.py +++ b/octavia/common/constants.py @@ -37,16 +37,37 @@ PROTOCOL_HTTPS = 'HTTPS' PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP) +# Note: The database Amphora table has a foreign key constraint against +# the provisioning_status table +# Amphora has been allocated to a load balancer +AMPHORA_ALLOCATED = 'ALLOCATED' +# Amphora healthy with listener(s) deployed +# TODO(johnsom) This doesn't exist +AMPHORA_UP = 'UP' +# Amphora unhealthy with listener(s) deployed +# TODO(johnsom) This doesn't exist +AMPHORA_DOWN = 'DOWN' +# Amphora is being built +AMPHORA_BOOTING = 'BOOTING' +# Amphora is ready to be allocated to a load balancer +AMPHORA_READY = 'READY' + ACTIVE = 'ACTIVE' PENDING_DELETE = 'PENDING_DELETE' PENDING_UPDATE = 'PENDING_UPDATE' PENDING_CREATE = 'PENDING_CREATE' DELETED = 'DELETED' ERROR = 'ERROR' -SUPPORTED_PROVISIONING_STATUSES = (ACTIVE, PENDING_DELETE, PENDING_CREATE, +SUPPORTED_PROVISIONING_STATUSES = (ACTIVE, AMPHORA_ALLOCATED, + AMPHORA_BOOTING, AMPHORA_READY, + PENDING_DELETE, PENDING_CREATE, PENDING_UPDATE, DELETED, ERROR) MUTABLE_STATUSES = (ACTIVE,) +SUPPORTED_AMPHORA_STATUSES = (AMPHORA_ALLOCATED, AMPHORA_UP, AMPHORA_DOWN, + AMPHORA_BOOTING, AMPHORA_READY, DELETED, + PENDING_DELETE) + ONLINE = 'ONLINE' OFFLINE = 'OFFLINE' DEGRADED = 'DEGRADED' @@ -56,9 +77,33 @@ SUPPORTED_OPERATING_STATUSES = (ONLINE, OFFLINE, DEGRADED, ERROR) AMPHORA_VM = 'VM' SUPPORTED_AMPHORA_TYPES = (AMPHORA_VM,) -AMPHORA_UP = 'UP' -AMPHORA_DOWN = 'DOWN' -SUPPORTED_AMPHORA_STATUSES = (AMPHORA_UP, AMPHORA_DOWN) +# Task/Flow constants +AMPHORA = 'amphora' +DELTA = 'delta' +LISTENER = 'listener' +LOADBALANCER = 'loadbalancer' +NICS = 'nics' +VIP = 'vip' + +CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow' +CREATE_AMPHORA_FOR_LB_FLOW = 'octavia-create-amp-for-lb-flow' +CREATE_HEALTH_MONITOR_FLOW = 'octavia-create-health-monitor-flow' +CREATE_LISTENER_FLOW = 'octavia-create-listener_flow' +CREATE_LOADBALANCER_FLOW = 'octavia-create-loadbalancer-flow' +CREATE_MEMBER_FLOW = 'octavia-create-member-flow' +CREATE_POOL_FLOW = 'octavia-create-pool-flow' +DELETE_AMPHORA_FLOW = 'octavia-delete-amphora-flow' +DELETE_HEALTH_MONITOR_FLOW = 'octavia-delete-health-monitor-flow' +DELETE_LISTENER_FLOW = 'octavia-delete-listener_flow' +DELETE_LOADBALANCER_FLOW = 'octavia-delete-loadbalancer-flow' +DELETE_MEMBER_FLOW = 'octavia-delete-member-flow' +DELETE_POOL_FLOW = 'octavia-delete-pool-flow' +LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow' +UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow' +UPDATE_LISTENER_FLOW = 'octavia-update-listener-flow' +UPDATE_LOADBALANCER_FLOW = 'octavia-update-loadbalancer-flow' +UPDATE_MEMBER_FLOW = 'octavia-update-member-flow' +UPDATE_POOL_FLOW = 'octavia-update-pool-flow' NOVA_1 = '1.1' NOVA_2 = '2' diff --git a/octavia/common/exceptions.py b/octavia/common/exceptions.py index 854d536f02..20ca8d70b2 100644 --- a/octavia/common/exceptions.py +++ b/octavia/common/exceptions.py @@ -20,7 +20,7 @@ Octavia base exception handling. from oslo_utils import excutils from webob import exc -from octavia.i18n import _LE +from octavia.i18n import _LE, _LI class OctaviaException(Exception): @@ -151,3 +151,15 @@ class ComputeStatusException(OctaviaException): class IDAlreadyExists(OctaviaException): message = _LE('Already an entity with that specified id.') code = 409 + + +class NoSuitableAmphoraException(OctaviaException): + message = _LE('Unable to allocate an amphora due to: %(msg)s') + + +# This is an internal use exception for the taskflow work flow +# and will not be exposed to the customer. This means it is a +# normal part of operation while waiting for nova to go active +# on the instance +class ComputeWaitTimeoutException(OctaviaException): + message = _LI('Waiting for compute to go active timeout.') diff --git a/octavia/controller/worker/__init__.py b/octavia/controller/worker/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/controller/worker/controller_worker.py b/octavia/controller/worker/controller_worker.py new file mode 100644 index 0000000000..28ae2fff43 --- /dev/null +++ b/octavia/controller/worker/controller_worker.py @@ -0,0 +1,469 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging + +from octavia.common import base_taskflow +from octavia.common import exceptions +from octavia.controller.worker.flows import amphora_flows +from octavia.controller.worker.flows import health_monitor_flows +from octavia.controller.worker.flows import listener_flows +from octavia.controller.worker.flows import load_balancer_flows +from octavia.controller.worker.flows import member_flows +from octavia.controller.worker.flows import pool_flows +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +from taskflow.listeners import logging as tf_logging + +LOG = logging.getLogger(__name__) + + +class ControllerWorker(base_taskflow.BaseTaskFlowEngine): + + def __init__(self): + + self._amphora_flows = amphora_flows.AmphoraFlows() + self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows() + self._lb_flows = load_balancer_flows.LoadBalancerFlows() + self._listener_flows = listener_flows.ListenerFlows() + self._member_flows = member_flows.MemberFlows() + self._pool_flows = pool_flows.PoolFlows() + + self._amphora_repo = repo.AmphoraRepository() + self._health_mon_repo = repo.HealthMonitorRepository() + self._lb_repo = repo.LoadBalancerRepository() + self._listener_repo = repo.ListenerRepository() + self._member_repo = repo.MemberRepository() + self._pool_repo = repo.PoolRepository() + + super(ControllerWorker, self).__init__() + + def create_amphora(self): + """Creates an Amphora. + + :returns: amphora_id + """ + create_amp_tf = self._taskflow_load(self._amphora_flows. + get_create_amphora_flow()) + with tf_logging.DynamicLoggingListener(create_amp_tf, + log=LOG): + create_amp_tf.run() + + return create_amp_tf.storage.fetch('amphora') + + def delete_amphora(self, amphora_id): + """Deletes an existing Amphora. + + :param amphora_id: ID of the amphora to delete + :returns: None + :raises AmphoraNotFound: The referenced Amphora was not found + """ + amphora = self._amphora_repo.get(db_apis.get_session(), + id=amphora_id) + delete_amp_tf = self._taskflow_load(self._amphora_flows. + get_delete_amphora_flow(), + store={'amphora': amphora}) + with tf_logging.DynamicLoggingListener(delete_amp_tf, + log=LOG): + delete_amp_tf.run() + + def create_health_monitor(self, health_monitor_id): + """Creates a health monitor. + + :param health_monitor_id: ID of the health monitor to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + health_mon = self._health_mon_repo.get(db_apis.get_session(), + pool_id=health_monitor_id) + + listener = health_mon.pool.listener + health_mon.pool.health_monitor = health_mon + listener.default_pool = health_mon.pool + vip = health_mon.pool.listener.load_balancer.vip + load_balancer = health_mon.pool.listener.load_balancer + + create_hm_tf = self._taskflow_load(self._health_monitor_flows. + get_create_health_monitor_flow(), + store={'health_mon': health_mon, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(create_hm_tf, + log=LOG): + create_hm_tf.run() + + def delete_health_monitor(self, health_monitor_id): + """Deletes a health monitor. + + :param health_monitor_id: ID of the health monitor to delete + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + health_mon = self._health_mon_repo.get(db_apis.get_session(), + pool_id=health_monitor_id) + + listener = health_mon.pool.listener + health_mon.pool.health_monitor = health_mon + listener.default_pool = health_mon.pool + vip = health_mon.pool.listener.load_balancer.vip + + delete_hm_tf = self._taskflow_load(self._health_monitor_flows. + get_delete_health_monitor_flow(), + store={'health_mon': health_mon, + 'health_mon_id': + health_monitor_id, + 'listener': listener, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(delete_hm_tf, + log=LOG): + delete_hm_tf.run() + + def update_health_monitor(self, health_monitor_id, health_monitor_updates): + """Updates a health monitor. + + :param health_monitor_id: ID of the health monitor to update + :param health_monitor_updates: Dict containing updated health monitor + :returns: None + :raises HMNotFound: The referenced health monitor was not found + """ + health_mon = self._health_mon_repo.get(db_apis.get_session(), + pool_id=health_monitor_id) + + listener = health_mon.pool.listener + health_mon.pool.health_monitor = health_mon + listener.default_pool = health_mon.pool + vip = health_mon.pool.listener.load_balancer.vip + load_balancer = health_mon.pool.listener.load_balancer + + update_hm_tf = self._taskflow_load(self._health_monitor_flows. + get_update_health_monitor_flow(), + store={'health_mon': health_mon, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip, + 'update_dict': + health_monitor_updates}) + with tf_logging.DynamicLoggingListener(update_hm_tf, + log=LOG): + update_hm_tf.run() + + def create_listener(self, listener_id): + """Creates a listener. + + :param listener_id: ID of the listener to create + :returns: None + :raises NoSuitableLB: Unable to find the load balancer + """ + listener = self._listener_repo.get(db_apis.get_session(), + id=listener_id) + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + create_listener_tf = self._taskflow_load(self._listener_flows. + get_create_listener_flow(), + store={'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(create_listener_tf, + log=LOG): + create_listener_tf.run() + + def delete_listener(self, listener_id): + """Deletes a listener. + + :param listener_id: ID of the listener to delete + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + listener = self._listener_repo.get(db_apis.get_session(), + id=listener_id) + vip = listener.load_balancer.vip + + delete_listener_tf = self._taskflow_load(self._listener_flows. + get_delete_listener_flow(), + store={'listener': listener, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(delete_listener_tf, + log=LOG): + delete_listener_tf.run() + + def update_listener(self, listener_id, listener_updates): + """Updates a listener. + + :param listener_id: ID of the listener to update + :param listener_updates: Dict containing updated listener attributes + :returns: None + :raises ListenerNotFound: The referenced listener was not found + """ + listener = self._listener_repo.get(db_apis.get_session(), + id=listener_id) + + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + update_listener_tf = self._taskflow_load(self._listener_flows. + get_update_listener_flow(), + store={'listener': listener, + 'vip': vip, + 'loadbalancer': + load_balancer, + 'update_dict': + listener_updates}) + with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG): + update_listener_tf.run() + + def create_load_balancer(self, load_balancer_id): + """Creates a load balancer by allocating Amphorae. + + First tries to allocate an existing Amphora in READY state. + If none are available it will attempt to build one specificly + for this load balancer. + + :param load_balancer_id: ID of the load balancer to create + :returns: None + :raises NoSuitableAmphoraException: Unable to allocate an Amphora. + """ + + # Note this is a bit strange in how it handles building + # Amphora if there are no spares. TaskFlow has a spec for + # a conditional flow that would make this cleaner once implemented. + # https://review.openstack.org/#/c/98946/ + + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) + + create_lb_tf = self._taskflow_load(self._lb_flows. + get_create_load_balancer_flow(), + store={'loadbalancer': lb}) + with tf_logging.DynamicLoggingListener(create_lb_tf, + log=LOG): + amp = None + try: + create_lb_tf.run() + amp = create_lb_tf.storage.fetch('amphora') + except Exception: + pass + + if amp is None: + + create_amp_lb_tf = self._taskflow_load( + self._amphora_flows.get_create_amphora_for_lb_flow(), + store={'loadbalancer': lb}) + + with tf_logging.DynamicLoggingListener(create_amp_lb_tf, + log=LOG): + try: + create_amp_lb_tf.run() + except exceptions.ComputeBuildException as e: + raise exceptions.NoSuitableAmphoraException(msg=e.msg) + + def delete_load_balancer(self, load_balancer_id): + """Deletes a load balancer by de-allocating Amphorae. + + :param load_balancer_id: ID of the load balancer to delete + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) + + delete_lb_tf = self._taskflow_load(self._lb_flows. + get_delete_load_balancer_flow(), + store={'loadbalancer': lb}) + + with tf_logging.DynamicLoggingListener(delete_lb_tf, + log=LOG): + delete_lb_tf.run() + + def update_load_balancer(self, load_balancer_id, load_balancer_updates): + """Updates a load balancer. + + :param load_balancer_id: ID of the load balancer to update + :param load_balancer_updates: Dict containing updated load balancer + :returns: None + :raises LBNotFound: The referenced load balancer was not found + """ + lb = self._lb_repo.get(db_apis.get_session(), + id=load_balancer_id) + + update_lb_tf = self._taskflow_load(self._lb_flows. + get_update_load_balancer_flow(), + store={'loadbalancer': lb}) + with tf_logging.DynamicLoggingListener(update_lb_tf, + log=LOG): + update_lb_tf.run() + + def create_member(self, member_id): + """Creates a pool member. + + :param member_id: ID of the member to create + :returns: None + :raises NoSuitablePool: Unable to find the node pool + """ + member = self._member_repo.get(db_apis.get_session(), + id=member_id) + + listener = member.pool.listener + listener.default_pool = member.pool + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + create_member_tf = self._taskflow_load(self._member_flows. + get_create_member_flow(), + store={'member': member, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(create_member_tf, + log=LOG): + create_member_tf.run() + + def delete_member(self, member_id): + """Deletes a pool member. + + :param memberr_id: ID of the member to delete + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + member = self._member_repo.get(db_apis.get_session(), + id=member_id) + + listener = member.pool.listener + listener.default_pool = member.pool + vip = listener.load_balancer.vip + + delete_member_tf = self._taskflow_load(self._member_flows. + get_delete_member_flow(), + store={'member': member, + 'member_id': member_id, + 'listener': listener, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(delete_member_tf, + log=LOG): + delete_member_tf.run() + + def update_member(self, member_id, member_updates): + """Updates a pool member. + + :param member_id: ID of the member to update + :param member_updates: Dict containing updated member attributes + :returns: None + :raises MemberNotFound: The referenced member was not found + """ + member = self._member_repo.get(db_apis.get_session(), + id=member_id) + + listener = member.pool.listener + listener.default_pool = member.pool + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + update_member_tf = self._taskflow_load(self._member_flows. + get_update_member_flow(), + store={'member': member, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip, + 'update_dict': + member_updates}) + with tf_logging.DynamicLoggingListener(update_member_tf, + log=LOG): + update_member_tf.run() + + def create_pool(self, pool_id): + """Creates a node pool. + + :param pool_id: ID of the pool to create + :returns: None + :raises NoSuitableLB: Unable to find the load balancer + """ + pool = self._pool_repo.get(db_apis.get_session(), + id=pool_id) + + listener = pool.listener + listener.default_pool = pool + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + create_pool_tf = self._taskflow_load(self._pool_flows. + get_create_pool_flow(), + store={'pool': pool, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(create_pool_tf, + log=LOG): + create_pool_tf.run() + + def delete_pool(self, pool_id): + """Deletes a node pool. + + :param pool_id: ID of the pool to delete + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + pool = self._pool_repo.get(db_apis.get_session(), + id=pool_id) + + listener = pool.listener + listener.default_pool = pool + vip = listener.load_balancer.vip + + delete_pool_tf = self._taskflow_load(self._pool_flows. + get_delete_pool_flow(), + store={'pool': pool, + 'pool_id': pool_id, + 'listener': listener, + 'vip': vip}) + with tf_logging.DynamicLoggingListener(delete_pool_tf, + log=LOG): + delete_pool_tf.run() + + def update_pool(self, pool_id, pool_updates): + """Updates a node pool. + + :param pool_id: ID of the pool to update + :param pool_updates: Dict containing updated pool attributes + :returns: None + :raises PoolNotFound: The referenced pool was not found + """ + pool = self._pool_repo.get(db_apis.get_session(), + id=pool_id) + + listener = pool.listener + listener.default_pool = pool + load_balancer = listener.load_balancer + vip = listener.load_balancer.vip + + update_pool_tf = self._taskflow_load(self._pool_flows. + get_update_pool_flow(), + store={'pool': pool, + 'listener': listener, + 'loadbalancer': + load_balancer, + 'vip': vip, + 'update_dict': + pool_updates}) + with tf_logging.DynamicLoggingListener(update_pool_tf, + log=LOG): + update_pool_tf.run() diff --git a/octavia/controller/worker/flows/__init__.py b/octavia/controller/worker/flows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/controller/worker/flows/amphora_flows.py b/octavia/controller/worker/flows/amphora_flows.py new file mode 100644 index 0000000000..8e73030ed9 --- /dev/null +++ b/octavia/controller/worker/flows/amphora_flows.py @@ -0,0 +1,115 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg +from taskflow.patterns import linear_flow +from taskflow import retry + +from octavia.common import constants +from octavia.controller.worker.flows import load_balancer_flows +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import compute_tasks +from octavia.controller.worker.tasks import controller_tasks +from octavia.controller.worker.tasks import database_tasks + +CONF = cfg.CONF +CONF.import_group('controller_worker', 'octavia.common.config') + + +class AmphoraFlows(object): + + def __init__(self): + self._lb_flows = load_balancer_flows.LoadBalancerFlows() + + def get_create_amphora_flow(self): + """Creates a flow to create an amphora. + + Ideally that should be configurable in the + config file - a db session needs to be placed + into the flow + + :returns: The flow for creating the amphora + """ + create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW) + create_amphora_flow.add(database_tasks.CreateAmphoraInDB( + provides='amphora')) + create_amphora_flow.add(compute_tasks.ComputeCreate()) + create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB()) + wait_flow = linear_flow.Flow('wait_for_amphora', + retry=retry.Times(CONF. + controller_worker. + amp_active_retries)) + wait_flow.add(compute_tasks.ComputeWait()) + create_amphora_flow.add(wait_flow) + create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize()) + create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB()) + + return create_amphora_flow + + def get_create_amphora_for_lb_flow(self): + """Creates a flow to create an amphora for a load balancer. + + This flow is used when there are no spare amphora available + for a new load balancer. It builds an amphora and allocates + for the specific load balancer. + + :returns: The The flow for creating the amphora + """ + create_amp_for_lb_flow = linear_flow.Flow(constants. + CREATE_AMPHORA_FOR_LB_FLOW) + create_amp_for_lb_flow.add(database_tasks.CreateAmphoraInDB()) + create_amp_for_lb_flow.add(compute_tasks.ComputeCreate()) + create_amp_for_lb_flow.add(database_tasks.MarkAmphoraBootingInDB()) + wait_flow = linear_flow.Flow('wait_for_amphora', + retry=retry.Times(CONF. + controller_worker. + amp_active_retries)) + wait_flow.add(compute_tasks.ComputeWait()) + create_amp_for_lb_flow.add(wait_flow) + create_amp_for_lb_flow.add(amphora_driver_tasks. + AmphoraFinalize()) + create_amp_for_lb_flow.add(database_tasks. + MarkAmphoraAllocatedInDB( + requires='loadbalancer')) + create_amp_for_lb_flow.add(database_tasks.GetAmphoraByID( + requires='amphora_id', + provides='amphora')) + create_amp_for_lb_flow.add(database_tasks.GetLoadbalancerByID( + requires='loadbalancer_id', + provides='loadbalancer')) + new_LB_net_subflow = self._lb_flows.get_new_LB_networking_subflow() + create_amp_for_lb_flow.add(new_LB_net_subflow) + create_amp_for_lb_flow.add(database_tasks.MarkLBActiveInDB( + requires='loadbalancer')) + + return create_amp_for_lb_flow + + def get_delete_amphora_flow(self): + """Creates a flow to delete an amphora. + + This should be configurable in the config file + :returns: The flow for deleting the amphora + :raises AmphoraNotFound: The referenced Amphora was not found + """ + + delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW) + delete_amphora_flow.add(controller_tasks.DeleteLoadBalancersOnAmp( + requires='amphora')) +# TODO(johnsom) make this just delete it + delete_amphora_flow.add(database_tasks. + MarkAmphoraPendingDeleteInDB( + requires='amphora')) + + return delete_amphora_flow diff --git a/octavia/controller/worker/flows/health_monitor_flows.py b/octavia/controller/worker/flows/health_monitor_flows.py new file mode 100644 index 0000000000..510a88c8f9 --- /dev/null +++ b/octavia/controller/worker/flows/health_monitor_flows.py @@ -0,0 +1,73 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.tasks import model_tasks + + +class HealthMonitorFlows(object): + + def get_create_health_monitor_flow(self): + """Create a flow to create a health monitor + + :returns: The flow for creating a health monitor + """ + create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW) + create_hm_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + create_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return create_hm_flow + + def get_delete_health_monitor_flow(self): + """Create a flow to delete a health monitor + + :returns: The flow for deleting a health monitor + """ + delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW) + delete_hm_flow.add(model_tasks. + DeleteModelObject(rebind={'object': 'health_mon'})) + delete_hm_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB( + requires='health_mon_id')) + delete_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return delete_hm_flow + + def get_update_health_monitor_flow(self): + """Create a flow to update a health monitor + + :returns: The flow for updating a health monitor + """ + update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW) + update_hm_flow.add(model_tasks. + UpdateAttributes( + rebind={'object': 'health_mon'}, + requires=['update_dict'])) + update_hm_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + update_hm_flow.add(database_tasks.UpdateHealthMonInDB( + requires=['health_mon', 'update_dict'])) + update_hm_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return update_hm_flow diff --git a/octavia/controller/worker/flows/listener_flows.py b/octavia/controller/worker/flows/listener_flows.py new file mode 100644 index 0000000000..52115dd822 --- /dev/null +++ b/octavia/controller/worker/flows/listener_flows.py @@ -0,0 +1,73 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.tasks import model_tasks + + +class ListenerFlows(object): + + def get_create_listener_flow(self): + """Create a flow to create a listener + + :returns: The flow for creating a listener + """ + create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW) + create_listener_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + create_listener_flow.add(database_tasks. + MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return create_listener_flow + + def get_delete_listener_flow(self): + """Create a flow to delete a listener + + :returns: The flow for deleting a listener + """ + delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) + delete_listener_flow.add(amphora_driver_tasks.ListenerDelete( + requires=['listener', 'vip'])) + delete_listener_flow.add(database_tasks.MarkListenerDeletedInDB( + requires='listener')) + delete_listener_flow.add(database_tasks. + MarkLBActiveInDB(requires='loadbalancer')) + + return delete_listener_flow + + def get_update_listener_flow(self): + """Create a flow to update a listener + + :returns: The flow for updating a listener + """ + update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW) + update_listener_flow.add(model_tasks. + UpdateAttributes( + rebind={'object': 'listener'}, + requires=['update_dict'])) + update_listener_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + update_listener_flow.add(database_tasks.UpdateListenerInDB( + requires=['listener', 'update_dict'])) + update_listener_flow.add(database_tasks. + MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return update_listener_flow diff --git a/octavia/controller/worker/flows/load_balancer_flows.py b/octavia/controller/worker/flows/load_balancer_flows.py new file mode 100644 index 0000000000..06c4c529a2 --- /dev/null +++ b/octavia/controller/worker/flows/load_balancer_flows.py @@ -0,0 +1,108 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo.config import cfg +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import controller_tasks +from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.tasks import network_tasks + +CONF = cfg.CONF +CONF.import_group('controller_worker', 'octavia.common.config') + + +class LoadBalancerFlows(object): + + def get_create_load_balancer_flow(self): + """Creates a flow to create a load balancer. + + :returns: The flow for creating a load balancer + """ + + # Note this flow is a bit strange in how it handles building + # Amphora if there are no spares. TaskFlow has a spec for + # a conditional flow that would make this cleaner once implemented. + # https://review.openstack.org/#/c/98946/ + + create_LB_flow = linear_flow.Flow(constants.CREATE_LOADBALANCER_FLOW) + create_LB_flow.add(database_tasks.MapLoadbalancerToAmphora( + requires='loadbalancer', + provides='amphora')) + create_LB_flow.add(database_tasks.GetAmphoraByID( + requires='amphora_id', + provides='amphora')) + create_LB_flow.add(database_tasks.GetLoadbalancerByID( + requires='loadbalancer_id', + provides='loadbalancer')) + new_LB_net_subflow = self.get_new_LB_networking_subflow() + create_LB_flow.add(new_LB_net_subflow) + create_LB_flow.add(database_tasks.MarkLBActiveInDB( + requires='loadbalancer')) + + return create_LB_flow + + def get_delete_load_balancer_flow(self): + """Creates a flow to delete a load balancer. + + :returns: The flow for deleting a load balancer + """ + delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) + delete_LB_flow.add(controller_tasks.DeleteListenersOnLB( + requires='loadbalancer')) +# TODO(johnsom) tear down the unplug vips? and networks + delete_LB_flow.add(database_tasks.MarkLBDeletedInDB( + requires='loadbalancer')) + + return delete_LB_flow + + def get_new_LB_networking_subflow(self): + """Create a sub-flow to setup networking. + + :returns: The flow to setup networking for a new amphora + """ + + new_LB_net_subflow = linear_flow.Flow(constants. + LOADBALANCER_NETWORKING_SUBFLOW) + new_LB_net_subflow.add(network_tasks.GetPlumbedNetworks( + requires='amphora', + provides='nics')) + new_LB_net_subflow.add(network_tasks.CalculateDelta( + requires=['amphora', 'nics'], + provides='delta')) + new_LB_net_subflow.add(network_tasks.PlugNetworks( + requires=['amphora', 'delta'])) + new_LB_net_subflow.add(amphora_driver_tasks.AmphoraPostNetworkPlug( + requires='amphora')) + new_LB_net_subflow.add(network_tasks.PlugVIP(requires='amphora')) + new_LB_net_subflow.add(amphora_driver_tasks.AmphoraPostVIPPlug( + requires='loadbalancer')) + + return new_LB_net_subflow + + def get_update_load_balancer_flow(self): + """Creates a flow to update a load balancer. + + :returns: The flow for update a load balancer + """ + update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW) + update_LB_flow.add(controller_tasks.DisableEnableLB( + requires='loadbalancer')) + update_LB_flow.add(database_tasks.MarkLBActiveInDB( + requires='loadbalancer')) + + return update_LB_flow diff --git a/octavia/controller/worker/flows/member_flows.py b/octavia/controller/worker/flows/member_flows.py new file mode 100644 index 0000000000..200f7e5f04 --- /dev/null +++ b/octavia/controller/worker/flows/member_flows.py @@ -0,0 +1,76 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.tasks import model_tasks + + +class MemberFlows(object): + + def get_create_member_flow(self): + """Create a flow to create a member + + :returns: The flow for creating a member + """ + create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW) + create_member_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + create_member_flow.add(database_tasks. + MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return create_member_flow + + def get_delete_member_flow(self): + """Create a flow to delete a member + + :returns: The flow for deleting a member + """ + delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW) + delete_member_flow.add(model_tasks. + DeleteModelObject(rebind={'object': 'member'})) + delete_member_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + delete_member_flow.add(database_tasks.DeleteMemberInDB( + requires='member_id')) + delete_member_flow.add(database_tasks. + MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return delete_member_flow + + def get_update_member_flow(self): + """Create a flow to update a member + + :returns: The flow for updating a member + """ + update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW) + update_member_flow.add(model_tasks. + UpdateAttributes( + rebind={'object': 'member'}, + requires=['update_dict'])) + update_member_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + update_member_flow.add(database_tasks.UpdateMemberInDB( + requires=['member', 'update_dict'])) + update_member_flow.add(database_tasks. + MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return update_member_flow diff --git a/octavia/controller/worker/flows/pool_flows.py b/octavia/controller/worker/flows/pool_flows.py new file mode 100644 index 0000000000..54387ed32c --- /dev/null +++ b/octavia/controller/worker/flows/pool_flows.py @@ -0,0 +1,72 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.controller.worker.tasks import database_tasks +from octavia.controller.worker.tasks import model_tasks + + +class PoolFlows(object): + + def get_create_pool_flow(self): + """Create a flow to create a pool + + :returns: The flow for creating a pool + """ + create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW) + create_pool_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + create_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return create_pool_flow + + def get_delete_pool_flow(self): + """Create a flow to delete a pool + + :returns: The flow for deleting a pool + """ + delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) + delete_pool_flow.add(model_tasks. + DeleteModelObject(rebind={'object': 'pool'})) + delete_pool_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + delete_pool_flow.add(database_tasks.DeletePoolInDB(requires='pool_id')) + delete_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return delete_pool_flow + + def get_update_pool_flow(self): + """Create a flow to update a pool + + :returns: The flow for updating a pool + """ + update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW) + update_pool_flow.add(model_tasks. + UpdateAttributes( + rebind={'object': 'pool'}, + requires=['update_dict'])) + update_pool_flow.add(amphora_driver_tasks.ListenerUpdate( + requires=['listener', 'vip'])) + update_pool_flow.add(database_tasks.UpdatePoolInDB( + requires=['pool', 'update_dict'])) + update_pool_flow.add(database_tasks.MarkLBAndListenerActiveInDB( + requires=['loadbalancer', 'listener'])) + + return update_pool_flow diff --git a/octavia/controller/worker/tasks/__init__.py b/octavia/controller/worker/tasks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/controller/worker/tasks/amphora_driver_tasks.py b/octavia/controller/worker/tasks/amphora_driver_tasks.py new file mode 100644 index 0000000000..92b5746ef3 --- /dev/null +++ b/octavia/controller/worker/tasks/amphora_driver_tasks.py @@ -0,0 +1,179 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging + +from oslo.config import cfg +from stevedore import driver as stevedore_driver +from taskflow import task + +from octavia.common import constants +from octavia.db import api as db_apis +from octavia.db import repositories as repo +from octavia.i18n import _LW + +CONF = cfg.CONF +CONF.import_group('controller_worker', 'octavia.common.config') +LOG = logging.getLogger(__name__) + + +class BaseAmphoraTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseAmphoraTask, self).__init__(**kwargs) + self.amphora_driver = stevedore_driver.DriverManager( + namespace='octavia.amphora.drivers', + name=CONF.controller_worker.amphora_driver, + invoke_on_load=True + ).driver + + self.amphora_repo = repo.AmphoraRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + + +class ListenerUpdate(BaseAmphoraTask): + """Task to update an amphora with new configuration for the listener.""" + + def execute(self, listener, vip): + """Execute listener update routines for an amphora.""" + self.amphora_driver.update(listener, vip) + LOG.debug("Updated amphora with new configuration for listener") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener update.""" + + LOG.warn(_LW("Reverting listener update.")) + self.listener_repo.update(db_apis.get_session(), id=listener.id, + provisioning_status=constants.ERROR) + return None + + +class ListenerStop(BaseAmphoraTask): + """Task to stop the listener on the vip.""" + + def execute(self, listener, vip): + """Execute listener stop routines for an amphora.""" + self.amphora_driver.stop(listener, vip) + LOG.debug("Stopped the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener stop.""" + + LOG.warn(_LW("Reverting listener stop.")) + self.listener_repo.update(db_apis.get_session(), id=listener.id, + provisioning_status=constants.ERROR) + return None + + +class ListenerStart(BaseAmphoraTask): + """Task to start the listener on the vip.""" + + def execute(self, listener, vip): + """Execute listener start routines for an amphora.""" + self.amphora_driver.start(listener, vip) + LOG.debug("Started the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener start.""" + + LOG.warn(_LW("Reverting listener start.")) + self.listener_repo.update(db_apis.get_session(), id=listener.id, + provisioning_status=constants.ERROR) + return None + + +class ListenerDelete(BaseAmphoraTask): + """Task to delete the listener on the vip.""" + + def execute(self, listener, vip): + """Execute listener delete routines for an amphora.""" + self.amphora_driver.delete(listener, vip) + LOG.debug("Deleted the listener on the vip") + + def revert(self, listener, *args, **kwargs): + """Handle a failed listener delete.""" + + LOG.warn(_LW("Reverting listener delete.")) + self.listener_repo.update(db_apis.get_session(), id=listener.id, + provisioning_status=constants.ERROR) + return None + + +class AmphoraGetInfo(BaseAmphoraTask): + """Task to get information on an amphora.""" + + def execute(self, amphora): + """Execute get_info routine for an amphora.""" + self.amphora_driver.get_info(amphora) + + +class AmphoraGetDiagnostics(BaseAmphoraTask): + """Task to get diagnostics on the amphora and the loadbalancers.""" + + def execute(self, amphora): + """Execute get_diagnostic routine for an amphora.""" + self.amphora_driver.get_diagnostics(amphora) + + +class AmphoraFinalize(BaseAmphoraTask): + """Task to finalize the amphora before any listeners are configured.""" + + def execute(self, amphora): + """Execute finalize_amphora routine.""" + self.amphora_driver.finalize_amphora(amphora) + LOG.debug("Finalized the amphora.") + + def revert(self, amphora, *args, **kwargs): + """Handle a failed amphora finalize.""" + + LOG.warn(_LW("Reverting amphora finalize.")) + self.amphora_repo.update(db_apis.get_session(), id=amphora.id, + provisioning_status=constants.ERROR) + return None + + +class AmphoraPostNetworkPlug(BaseAmphoraTask): + """Task to notify the amphora post network plug.""" + + def execute(self, amphora): + """Execute post_network_plug routine.""" + self.amphora_driver.post_network_plug(amphora) + LOG.debug("Posted network plug for the compute instance") + + def revert(self, amphora, *args, **kwargs): + """Handle a failed post network plug.""" + LOG.warn(_LW("Reverting post network plug.")) + self.amphora_repo.update(db_apis.get_session(), id=amphora.id, + provisioning_status=constants.ERROR) + return None + + +class AmphoraPostVIPPlug(BaseAmphoraTask): + """Task to notify the amphora post VIP plug.""" + + def execute(self, loadbalancer): + """Execute post_vip_routine.""" + self.amphora_driver.post_vip_plug(loadbalancer) + LOG.debug("Notfied amphora of vip plug") + + def revert(self, loadbalancer, *args, **kwargs): + """Handle a failed amphora vip plug notification.""" + LOG.warn(_LW("Reverting post vip plug.")) + self.loadbalancer_repo.update(db_apis.get_session(), + id=loadbalancer.id, + provisioning_status=constants.ERROR) + return None diff --git a/octavia/controller/worker/tasks/compute_tasks.py b/octavia/controller/worker/tasks/compute_tasks.py new file mode 100644 index 0000000000..3e1fe80422 --- /dev/null +++ b/octavia/controller/worker/tasks/compute_tasks.py @@ -0,0 +1,110 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging +import time + +from oslo.config import cfg +from stevedore import driver as stevedore_driver +from taskflow import task + +from octavia.common import constants +from octavia.common import exceptions +from octavia.i18n import _LE, _LW + +CONF = cfg.CONF +CONF.import_group('controller_worker', 'octavia.common.config') +LOG = logging.getLogger(__name__) + + +class BaseComputeTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseComputeTask, self).__init__(**kwargs) + self.compute = stevedore_driver.DriverManager( + namespace='octavia.compute.drivers', + name=CONF.controller_worker.compute_driver, + invoke_on_load=True, + invoke_kwds={'region': CONF.nova_region_name} + ).driver + + +class ComputeCreate(BaseComputeTask): + """Create the compute instance for a new amphora.""" + + def execute(self, amphora): + """Create an amphora + + :returns: an amphora + """ + LOG.debug("Nova Create execute for amphora with id %s" % amphora.id) + + try: + # todo(german): add security groups + compute_id = self.compute.build( + name="amphora-" + amphora.id, + amphora_flavor=CONF.controller_worker.amp_flavor_id, + image_id=CONF.controller_worker.amp_image_id, + key_name=CONF.controller_worker.amp_ssh_key, + sec_groups=None, + network_ids=CONF.controller_worker.amp_network) + + LOG.debug("Server created with id: %s for amphora id: %s" % + (compute_id, amphora.id)) + + amphora.compute_id = compute_id + + return amphora + + except Exception as e: + LOG.error(_LE("Nova create for amphora id: %(amp)s " + "failed: %(exp)s"), + {'amp': amphora.id, 'exp': e}) + raise e + + def revert(self, amphora, *args, **kwargs): + """This method will revert the creation of the + + amphora. So it will just delete it in this flow + """ + LOG.warn(_LW("Reverting Nova create for amphora with id" + "%(amp)s and compute id: %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + try: + self.compute.delete(amphora.compute_id) + amphora.compute_id = None + except Exception as e: + LOG.error(_LE("Reverting Nova create failed" + " with exception %s"), e) + return + + +class ComputeWait(BaseComputeTask): + """Wait for the compute driver to mark the amphora active.""" + + def execute(self, amphora): + """Wait for the compute driver to mark the amphora active + + :raises: Generic exception if the amphora is not active + :returns: An amphora object + """ + time.sleep(CONF.controller_worker.amp_active_wait_sec) + amp = self.compute.get_amphora(amphora.compute_id) + if amp.status == constants.ACTIVE: + amphora.lb_network_ip = amp.lb_network_ip + return amphora + + raise exceptions.ComputeWaitTimeoutException() diff --git a/octavia/controller/worker/tasks/controller_tasks.py b/octavia/controller/worker/tasks/controller_tasks.py new file mode 100644 index 0000000000..b837e9c746 --- /dev/null +++ b/octavia/controller/worker/tasks/controller_tasks.py @@ -0,0 +1,91 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging + +from taskflow import task + +from octavia.db import api as db_apis +from octavia.db import repositories as repo + +LOG = logging.getLogger(__name__) + + +class BaseControllerTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + from octavia.controller.worker import controller_worker + self.cntrlr_worker = controller_worker.ControllerWorker() + self.listener_repo = repo.ListenerRepository() + self.amp_repo = repo.AmphoraRepository() + super(BaseControllerTask, self).__init__(**kwargs) + + +class DeleteLoadBalancersOnAmp(BaseControllerTask): + """Delete the load balancers on an amphora.""" + + def execute(self, amphora): + """Deletes the load balancers on an amphora. + + Iterate across the load balancers on an amphora and + call back into the controller worker to delete the + load balancers. + + :param amphora: The amphora to delete the load balancers from + """ + lbs = self.amp_repo.get_all_lbs_on_amphora(db_apis.get_session(), + amphora_id=amphora.id) + for lb in lbs: + self.cntrlr_worker.delete_load_balancer(lb.id) + + +class DeleteListenersOnLB(BaseControllerTask): + """Deletes listeners on a load balancer.""" + + def execute(self, loadbalancer): + """Deletes listeners on a load balancer. + + Iterate across the listeners on a load balancer and + call back into the controller worker to delete the + listeners. + + :param loadbalancer: The load balancer to delete listeners from + """ + listeners = self.listener_repo.get_all(db_apis.get_session(), + load_balancer_id=( + loadbalancer.id)) + for listener in listeners: + self.cntrlr_worker.delete_listener(listener.id) + + +class DisableEnableLB(BaseControllerTask): + """Enables or disables a load balancer.""" + + def execute(self, loadbalancer): + """Enables or disables a load balancer. + + Iterate across the listeners starting or stoping them + based on the load balancer enabled / disable. + + :param loadbalancer: The load balancer to enable/disable + """ + listeners = self.listener_repo.get_all(db_apis.get_session(), + load_balancer_id=( + loadbalancer.id)) + for listener in listeners: + if loadbalancer.enabled != listener.enabled: + self.cntrlr_worker.update_listener( + {'enabled': loadbalancer.enabled}, listener.id) diff --git a/octavia/controller/worker/tasks/database_tasks.py b/octavia/controller/worker/tasks/database_tasks.py new file mode 100644 index 0000000000..31fa902c23 --- /dev/null +++ b/octavia/controller/worker/tasks/database_tasks.py @@ -0,0 +1,700 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging + +from oslo_utils import uuidutils +from taskflow import task + +from octavia.common import constants +from octavia.db import api as db_apis +from octavia.db import repositories as repo +from octavia.i18n import _LW + +LOG = logging.getLogger(__name__) + + +class BaseDatabaseTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + self.amphora_repo = repo.AmphoraRepository() + self.health_mon_repo = repo.HealthMonitorRepository() + self.listener_repo = repo.ListenerRepository() + self.loadbalancer_repo = repo.LoadBalancerRepository() + self.member_repo = repo.MemberRepository() + self.pool_repo = repo.PoolRepository() + super(BaseDatabaseTask, self).__init__(**kwargs) + + +class CreateAmphoraInDB(BaseDatabaseTask): + """Task to create an initial amphora in the Database.""" + + default_provides = constants.AMPHORA + + def execute(self, *args, **kwargs): + """Creates an pending create amphora record in the database. + + :returns: The amphora object created + """ + + amphora = self.amphora_repo.create(db_apis.get_session(), + id=uuidutils.generate_uuid(), + status=constants.PENDING_CREATE) + + LOG.debug("Created Amphora in DB with id %s" % amphora.id) + return amphora + + def revert(self, *args, **kwargs): + """Revert by storing the amphora in error state in the DB + + In a future version we might change the status to DELETED + if deleting the amphora was successful + """ + + if 'result' not in kwargs: + return None # nothing to do + +# amphora = kwargs['result'] +# TODO(johnsom) fix +# LOG.warn(_LW("Reverting create amphora in DB for amp id %s "), +# amphora.id) + +# _amphora_repo.update(db_apis.get_session(), amphora.id, +# status=constants.ERROR, +# compute_id=amphora.compute_id) + + +class DeleteHealthMonitorInDB(BaseDatabaseTask): + """Delete the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon_id): + """Delete the health monitor in DB + + :param health_mon_id: The health monitor id to delete + :returns: None + """ + + LOG.debug("DB delete health monitor for id: %s " % + health_mon_id) + self.health_mon_repo.delete(db_apis.get_session(), health_mon_id) + + def revert(self, health_mon_id, *args, **kwargs): + """Mark the health monitor ERROR since the mark active couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting mark health monitor delete in DB " + "for health monitor id %s"), health_mon_id) +# TODO(johnsom) fix this +# self.health_mon_repo.update(db_apis.get_session(), health_mon.id, +# provisioning_status=constants.ERROR) + + +class DeleteMemberInDB(BaseDatabaseTask): + """Delete the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member_id): + """Delete the member in the DB + + :param member_id: The member ID to be deleted + :returns: None + """ + + LOG.debug("DB delete member for id: %s " % + member_id) + self.member_repo.delete(db_apis.get_session(), member_id) + + def revert(self, member_id, *args, **kwargs): + """Mark the member ERROR since the delete couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting delete in DB " + "for member id %s"), member_id) +# TODO(johnsom) fix this +# self.member_repo.update(db_apis.get_session(), member.id, +# operating_status=constants.ERROR) + + +class DeletePoolInDB(BaseDatabaseTask): + """Delete the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool_id): + """Delete the pool in DB + + :param pool_id: The pool ID to be deleted + :returns: None + """ + + LOG.debug("Delete in DB for pool id: %s " % + pool_id) + self.pool_repo.delete(db_apis.get_session(), pool_id) + + def revert(self, pool_id, *args, **kwargs): + """Mark the pool ERROR since the delete couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting delete in DB " + "for pool id %s"), pool_id) +# TODO(johnsom) Fix this +# self.pool_repo.update(db_apis.get_session(), pool.id, +# operating_status=constants.ERROR) + + +class GetAmphoraByID(BaseDatabaseTask): + """Get an amphora object from the database.""" + + def execute(self, amphora_id): + """Get an amphora object from the database. + + :param amphora_id: The amphora ID to lookup + :returns: The amphora object + """ + + LOG.debug("Get amphora from DB for amphora id: %s " % + amphora_id) + return self.amphora_repo.get(db_apis.get_session(), id=amphora_id) + + +class GetLoadbalancerByID(BaseDatabaseTask): + """Get an load balancer object from the database.""" + + def execute(self, loadbalancer_id): + """Get an load balancer object from the database. + + :param loadbalancer_id: The load balancer ID to lookup + :returns: The load balancer object + """ + + LOG.debug("Get load balancer from DB for load balancer id: %s " % + loadbalancer_id) + return self.loadbalancer_repo.get(db_apis.get_session(), + id=loadbalancer_id) + + +class MapLoadbalancerToAmphora(BaseDatabaseTask): + """Maps and assigns a load balancer to an amphora in the database.""" + + def execute(self, loadbalancer): + """Allocates an Amphora for the load balancer in the database. + + :param lb_id: The load balancer id to map to an amphora + :returns: Amphora ID if one was allocated, None if it was + unable to allocate an Amphora + """ + + LOG.debug("Allocating an Amphora for load balancer with id %s" % + loadbalancer.id) + + amp = self.amphora_repo.allocate_and_associate( + db_apis.get_session(), + loadbalancer.id) + if amp is None: + LOG.debug("No Amphora available for load balancer with id %s" % + loadbalancer.id) + else: + LOG.debug("Allocated Amphora with id %s for load balancer " + "with id %s" % (amp.id, loadbalancer.id)) + + return amp + + +class MarkAmphoraAllocatedInDB(BaseDatabaseTask): + """Will mark an amphora as allocated to a load balancer in the database. + + Assume sqlalchemy made sure the DB got + retried sufficiently - so just abort + """ + + def execute(self, amphora, loadbalancer): + """Mark amphora as allocated to a load balancer in DB.""" + + LOG.debug("Mark ALLOCATED in DB for amphora: %s with compute id %s " + "for load balancer: %s" % + (amphora.id, amphora.compute_id, loadbalancer.id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.AMPHORA_ALLOCATED, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip, + load_balancer_id=loadbalancer.id) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora ready in DB for amp " + "id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR) + + +class MarkAmphoraBootingInDB(BaseDatabaseTask): + """Mark the amphora as booting in the database.""" + + def execute(self, amphora): + """Mark amphora booting in DB.""" + + LOG.debug("Mark BOOTING in DB for amphora: %s with compute id %s" % + (amphora.id, amphora.compute_id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.AMPHORA_BOOTING, + compute_id=amphora.compute_id) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora booting in DB for amp " + "id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR, + compute_id=amphora.compute_id) + + +class MarkAmphoraDeletedInDB(BaseDatabaseTask): + """Mark the amphora deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending delete in DB.""" + + LOG.debug("Mark DELETED in DB for amphora: %s " + "with compute id %s" % + (amphora.id, amphora.compute_id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.DELETED) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora deleted in DB " + "for amp id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR) + + +class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): + """Mark the amphora pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending delete in DB.""" + + LOG.debug("Mark PENDING DELETE in DB for amphora: %s " + "with compute id %s" % + (amphora.id, amphora.compute_id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.PENDING_DELETE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora pending delete in DB " + "for amp id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR) + + +class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): + """Mark the amphora pending update in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, amphora): + """Mark the amphora as pending upate in DB.""" + + LOG.debug("Mark PENDING UPDATE in DB for amphora: %s " + "with compute id %s" % + (amphora.id, amphora.compute_id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.PENDING_UPDATE) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora pending update in DB " + "for amp id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR) + + +class MarkAmphoraReadyInDB(BaseDatabaseTask): + """This task will mark an amphora as ready in the database. + + Assume sqlalchemy made sure the DB got + retried sufficiently - so just abort + """ + + def execute(self, amphora): + """Mark amphora as ready in DB.""" + + LOG.debug("Mark BOOTING in DB for amphora: %s with compute id %s" % + (amphora.id, amphora.compute_id)) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.AMPHORA_READY, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip) + + def revert(self, amphora, *args, **kwargs): + """Mark the amphora as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark amphora ready in DB for amp " + "id %(amp)s and compute id %(comp)s"), + {'amp': amphora.id, 'comp': amphora.compute_id}) + self.amphora_repo.update(db_apis.get_session(), amphora.id, + status=constants.ERROR, + compute_id=amphora.compute_id, + lb_network_ip=amphora.lb_network_ip) + + +class MarkLBActiveInDB(BaseDatabaseTask): + """Mark the load balancer active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as active in DB.""" + + LOG.debug("Mark ACTIVE in DB for load balancer id: %s" % + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ACTIVE) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark load balancer deleted in DB " + "for load balancer id %s"), loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ERROR) + + +class MarkLBDeletedInDB(BaseDatabaseTask): + """Mark the load balancer deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as deleted in DB.""" + + LOG.debug("Mark DELETED in DB for load balancer id: %s" % + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.DELETED) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark load balancer deleted in DB " + "for load balancer id %s"), loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ERROR) + + +class MarkLBPendingDeleteInDB(BaseDatabaseTask): + """Mark the load balancer pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer): + """Mark the load balancer as pending delete in DB.""" + + LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s" % + loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=(constants. + PENDING_DELETE)) + + def revert(self, loadbalancer, *args, **kwargs): + """Mark the load balancer as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark load balancer pending delete in DB " + "for load balancer id %s"), loadbalancer.id) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ERROR) + + +class MarkLBAndListenerActiveInDB(BaseDatabaseTask): + """Mark the load balancer and listener active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, loadbalancer, listener): + """Mark the load balancer and listener as active in DB.""" + + LOG.debug("Mark ACTIVE in DB for load balancer id: %s " + "and listener id: %s" % (loadbalancer.id, listener.id)) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ACTIVE) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ACTIVE) + + def revert(self, loadbalancer, listener, *args, **kwargs): + """Mark the load balancer and listener as broken.""" + + LOG.warn(_LW("Reverting mark load balancer " + "and listener active in DB " + "for load balancer id %(LB)s and " + "listener id: %(list)s"), + {'LB': loadbalancer.id, 'list': listener.id}) + self.loadbalancer_repo.update(db_apis.get_session(), + loadbalancer.id, + provisioning_status=constants.ERROR) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ERROR) + + +class MarkListenerActiveInDB(BaseDatabaseTask): + """Mark the listener active in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as active in DB + + :param listener: The listener to be marked deleted + :returns: None + """ + + LOG.debug("Mark ACTIVE in DB for listener id: %s " % + listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ACTIVE) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the delete couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting mark listener deleted in DB " + "for listener id %s"), listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ERROR) + + +class MarkListenerDeletedInDB(BaseDatabaseTask): + """Mark the listener deleted in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as deleted in DB + + :param listener: The listener to be marked deleted + :returns: None + """ + + LOG.debug("Mark DELETED in DB for listener id: %s " % + listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.DELETED) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the delete couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting mark listener deleted in DB " + "for listener id %s"), listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ERROR) + + +class MarkListenerPendingDeleteInDB(BaseDatabaseTask): + """Mark the listener pending delete in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener): + """Mark the listener as pending delete in DB.""" + + LOG.debug("Mark PENDING DELETE in DB for listener id: %s" % + listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.PENDING_DELETE) + + def revert(self, listener, *args, **kwargs): + """Mark the listener as broken and ready to be cleaned up.""" + + LOG.warn(_LW("Reverting mark listener pending delete in DB " + "for listener id %s"), listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + provisioning_status=constants.ERROR) + + +class UpdateHealthMonInDB(BaseDatabaseTask): + """Update the health monitor in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, health_mon, update_dict): + """Update the health monitor in the DB + + :param health_mon: The health monitor to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for health monitor id: %s " % + health_mon.pool_id) + self.health_mon_repo.update(db_apis.get_session(), health_mon.pool_id, + **update_dict) + + def revert(self, health_mon, *args, **kwargs): + """Mark the health monitor ERROR since the update couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting update health monitor in DB " + "for health monitor id %s"), health_mon.pool_id) +# TODO(johnsom) fix this to set the upper ojects to ERROR + self.health_mon_repo.update(db_apis.get_session(), health_mon.pool_id, + enabled=0) + + +class UpdateListenerInDB(BaseDatabaseTask): + """Update the listener in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, listener, update_dict): + """Update the listener in the DB + + :param listener: The listener to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for listener id: %s " % + listener.id) + self.listener_repo.update(db_apis.get_session(), listener.id, + **update_dict) + + def revert(self, listener, *args, **kwargs): + """Mark the listener ERROR since the update couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting update listener in DB " + "for listener id %s"), listener.id) +# TODO(johnsom) fix this to set the upper ojects to ERROR + self.listener_repo.update(db_apis.get_session(), listener.id, + enabled=0) + + +class UpdateMemberInDB(BaseDatabaseTask): + """Update the member in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, member, update_dict): + """Update the member in the DB + + :param member: The member to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for member id: %s " % + member.id) + self.member_repo.update(db_apis.get_session(), member.id, + **update_dict) + + def revert(self, member, *args, **kwargs): + """Mark the member ERROR since the update couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting update member in DB " + "for member id %s"), member.id) +# TODO(johnsom) fix this to set the upper ojects to ERROR + self.member_repo.update(db_apis.get_session(), member.id, + enabled=0) + + +class UpdatePoolInDB(BaseDatabaseTask): + """Update the pool in the DB. + + Since sqlalchemy will likely retry by itself always revert if it fails + """ + + def execute(self, pool, update_dict): + """Update the pool in the DB + + :param pool: The pool to be updated + :param update_dict: The dictionary of updates to apply + :returns: None + """ + + LOG.debug("Update DB for pool id: %s " % + pool.id) + self.pool_repo.update(db_apis.get_session(), pool.id, + **update_dict) + + def revert(self, pool, *args, **kwargs): + """Mark the pool ERROR since the update couldn't happen + + :returns: None + """ + + LOG.warn(_LW("Reverting update pool in DB " + "for pool id %s"), pool.id) +# TODO(johnsom) fix this to set the upper ojects to ERROR + self.pool_repo.update(db_apis.get_session(), pool.id, + enabled=0) diff --git a/octavia/controller/worker/tasks/model_tasks.py b/octavia/controller/worker/tasks/model_tasks.py new file mode 100644 index 0000000000..7dfc8a36c9 --- /dev/null +++ b/octavia/controller/worker/tasks/model_tasks.py @@ -0,0 +1,33 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow import task + + +class DeleteModelObject(task.Task): + """Task to delete an object in a model.""" + + def execute(self, object): + + object.delete() + + +class UpdateAttributes(task.Task): + """Task to update an object for changes.""" + + def execute(self, object, update_dict): + + for key, value in update_dict.items(): + setattr(object, key, value) diff --git a/octavia/controller/worker/tasks/network_tasks.py b/octavia/controller/worker/tasks/network_tasks.py new file mode 100644 index 0000000000..7a90e1672b --- /dev/null +++ b/octavia/controller/worker/tasks/network_tasks.py @@ -0,0 +1,235 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import logging + +from oslo.config import cfg +from stevedore import driver as stevedore_driver +from taskflow import task + +from octavia.common import constants +from octavia.i18n import _LW, _LE +from octavia.network import base +from octavia.network import data_models + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.import_group('controller_worker', 'octavia.common.config') + + +class BaseNetworkTask(task.Task): + """Base task to load drivers common to the tasks.""" + + def __init__(self, **kwargs): + super(BaseNetworkTask, self).__init__(**kwargs) + + self.network_driver = stevedore_driver.DriverManager( + namespace='octavia.network.drivers', + name=CONF.controller_worker.network_driver, + invoke_on_load=True + ).driver + + +class CalculateDelta(BaseNetworkTask): + """Task to calculate the delta between + + the nics on the amphora and the ones + we need. Returns a list for + plumbing them. + """ + + default_provides = constants.DELTA + + def execute(self, amphora, nics): + """Compute which NICs need to be plugged + + for the amphora to become operational. + :param amphora - the amphora configuration we + want to achieve + :param nics - the nics on the real amphora + :returns the delta + """ + + LOG.debug("Calculating network delta for amphora id: %s" % amphora.id) + + # Figure out what networks we want + # seed with lb network(s) + desired_network_ids = set(CONF.controller_worker.amp_network) + if (not amphora.load_balancer) or ( + not amphora.load_balancer.listeners): + return {'add': [], 'delete': []} + + for listener in amphora.load_balancer.listeners: + if (not listener.default_pool) or ( + not listener.default_pool.members): + continue + desired_network_ids.update(list( + member.subnet_id for member in listener.default_pool.members)) + + # assume we don't have two nics in the same network + actual_network_nics = dict((nic.network_id, nic) for nic in nics) + + del_ids = set(actual_network_nics) - desired_network_ids + delete_nics = list( + actual_network_nics[net_id] for net_id in del_ids) + + add_ids = desired_network_ids - set(actual_network_nics) + add_nics = list( + data_models.Interface(network_id=net_id) for net_id in add_ids) + + return { + 'delete': delete_nics, + 'add': add_nics + } + + +class GetPlumbedNetworks(BaseNetworkTask): + """Task to figure out the NICS on an amphora. + + This will likely move into the amphora driver + :returns: Array of networks + """ + + default_provides = constants.NICS + + def execute(self, amphora): + """Get plumbed networks for the amphora.""" + + LOG.debug("Getting plumbed networks for amphora id: %s" % amphora.id) + + return self.network_driver.get_plugged_networks(amphora.compute_id) + + +class PlugNetworks(BaseNetworkTask): + """Task to plug the networks. + + This uses the delta to add all missing networks/nics + """ + + def execute(self, amphora, delta): + """Update the amphora networks for the delta.""" + + LOG.debug("Plug or unplug networks for amphora id: %s" % amphora.id) + + if not delta: + LOG.debug("No network deltas for amphora id: %s" % amphora.id) + return None + + # add nics + for nic in delta['add']: + self.network_driver.plug_network(amphora.compute_id, + nic.network_id) + + def revert(self, amphora, delta): + """Handle a failed network plug by removing all nics added.""" + + LOG.warn(_LW("Unable to plug networks for amp id %s"), amphora.id) + if not delta: + return + + for nic in delta['add']: + try: + self.network_driver.unplug_network(amphora.compute_id, + nic.network_id) + except base.NetworkNotFound: + pass + + +class UnPlugNetworks(BaseNetworkTask): + """Task to unplug the networks + + Loop over all nics and unplug them + based on delta + """ + + def execute(self, amphora, delta): + """Unplug the networks.""" + + LOG.debug("Unplug network for amphora") + if not delta: + LOG.debug("No network deltas for amphora id: %s" % amphora.id) + return None + + for nic in delta['delete']: + try: + self.network_driver.unplug_network(amphora.compute_id, + nic.network_id) + except base.NetworkNotFound as e: + LOG.debug("Network %d not found ", nic.network_id) + pass + except Exception as e: + LOG.error( + _LE("Unable to unplug network - exception: %s"), + e.message) + pass # Todo(german) follow up if that makes sense + + +class PlugVIP(BaseNetworkTask): + """Task to plumb a VIP.""" + + def execute(self, amphora): + """Plumb a vip to an amphora.""" + + # Likely needs to be a subflow! + + LOG.debug("Plumbing VIP for amphora id: %s" % amphora.id) + + vip = self.network_driver.plug_vip(amphora.load_balancer, + amphora.load_balancer.vip) + amphora.load_balancer.vip = vip + return + + def revert(self, amphora): + """Handle a failure to plumb a vip.""" + + LOG.warn(_LW("Unable to plug VIP for amp id %s"), amphora.id) + + self.network_driver.unplug_vip(amphora.load_balancer, + amphora.load_balancer.vip) + + return + + +class AllocateVIP(BaseNetworkTask): + """Task to allocate a VIP.""" + + def execute(self, port_id, network_id, ip_address): + """Allocate a vip to the loadbalancer.""" + + LOG.debug("Allocate_vip port_id %s, network_id %s," + "ip_address %s", + port_id, network_id, ip_address) + return self.network_driver.allocate_vip(port_id, + network_id, ip_address) + + def revert(self, vip): + """Handle a failure to allocate vip.""" + + LOG.warn(_LW("Unable to allocate VIP %s"), vip.ip_address) + + self.network_driver.deallocate_vip(vip) + return + + +class DeallocateVIP(BaseNetworkTask): + """Task to deallocate a VIP.""" + + def execute(self, vip): + """Deallocate a VIP.""" + + LOG.debug("Deallocating a VIP %s", vip.ip_address) + + self.network_driver.deallocate_vip(vip) + return diff --git a/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py b/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py new file mode 100644 index 0000000000..2e45d17461 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/48660b6643f0_add_new_states_for_amphora.py @@ -0,0 +1,51 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add new states for amphora + +Revision ID: 48660b6643f0 +Revises: 3e5b37a0bdb9 +Create Date: 2015-01-20 13:31:30.017959 + +""" + +# revision identifiers, used by Alembic. +revision = '48660b6643f0' +down_revision = '3e5b37a0bdb9' + +from alembic import op +import sqlalchemy as sa +from sqlalchemy import sql + + +def upgrade(): + insert_table = sql.table( + u'provisioning_status', + sql.column(u'name', sa.String), + sql.column(u'description', sa.String) + ) + + op.bulk_insert( + insert_table, + [ + {'name': 'READY'}, + {'name': 'BOOTING'}, + {'name': 'ALLOCATED'} + ] + ) + + +def downgrade(): + pass diff --git a/octavia/db/repositories.py b/octavia/db/repositories.py index 398785e145..f0b48ae9cb 100644 --- a/octavia/db/repositories.py +++ b/octavia/db/repositories.py @@ -301,7 +301,12 @@ class AmphoraRepository(BaseRepository): model_class = models.Amphora def associate(self, session, load_balancer_id, amphora_id): - """Associates an amphora with a load balancer.""" + """Associates an amphora with a load balancer. + + :param session: A Sql Alchemy database session. + :param load_balancer_id: The load balancer id to associate + :param amphora_id: The amphora id to associate + """ with session.begin(subtransactions=True): load_balancer = session.query(models.LoadBalancer).filter_by( id=load_balancer_id).first() @@ -309,6 +314,44 @@ class AmphoraRepository(BaseRepository): id=amphora_id).first() load_balancer.amphorae.append(amphora) + def allocate_and_associate(self, session, load_balancer_id): + """Allocate an amphora for a load balancer. + + For v0.5 this is simple, find a free amp and + associate the lb. In the future this needs to be + enhanced. + + :param session: A Sql Alchemy database session. + :param load_balancer_id: The load balancer id to associate + :returns: The amphora ID for the load balancer or None + """ + with session.begin(subtransactions=True): + amp = session.query(self.model_class).with_for_update().filter_by( + status='READY', load_balancer_id=None).first() + + if amp is None: + return None + + amp.status = 'ALLOCATED' + amp.load_balancer_id = load_balancer_id + + return amp.to_data_model() + + def get_all_lbs_on_amphora(self, session, amphora_id): + """Get all of the load balancers on an amphora. + + :param session: A Sql Alchemy database session. + :param amphora_id: The amphora id to list the load balancers from + :returns: [octavia.common.data_model] + """ + with session.begin(subtransactions=True): + lb_subquery = (session.query(self.model_class.load_balancer_id). + filter_by(id=amphora_id).subquery()) + lb_list = (session.query(models.LoadBalancer). + filter(models.LoadBalancer.id.in_(lb_subquery)).all()) + data_model_list = [model.to_data_model() for model in lb_list] + return data_model_list + class SNIRepository(BaseRepository): model_class = models.SNI diff --git a/octavia/tests/functional/db/test_repositories.py b/octavia/tests/functional/db/test_repositories.py index 1d66d36457..ad68a1ddae 100644 --- a/octavia/tests/functional/db/test_repositories.py +++ b/octavia/tests/functional/db/test_repositories.py @@ -1113,6 +1113,27 @@ class AmphoraRepositoryTest(BaseRepositoryTest): new_lb = self.lb_repo.get(self.session, id=self.lb.id) self.assertEqual(0, len(new_lb.amphorae)) + def test_allocate_and_associate(self): + new_amphora = self.amphora_repo.allocate_and_associate(self.session, + self.lb.id) + self.assertIsNone(new_amphora) + + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.update(self.session, amphora.id, + status=constants.AMPHORA_READY) + new_amphora = self.amphora_repo.allocate_and_associate(self.session, + self.lb.id) + self.assertIsNotNone(new_amphora) + self.assertIsInstance(new_amphora, models.Amphora) + + def test_get_all_lbs_on_amphora(self): + amphora = self.create_amphora(self.FAKE_UUID_1) + self.amphora_repo.associate(self.session, self.lb.id, amphora.id) + lb_list = self.amphora_repo.get_all_lbs_on_amphora(self.session, + amphora.id) + self.assertIsNotNone(lb_list) + self.assertIn(self.lb, lb_list) + class AmphoraHealthRepositoryTest(BaseRepositoryTest): def setUp(self): diff --git a/octavia/tests/unit/common/test_base_taskflow.py b/octavia/tests/unit/common/test_base_taskflow.py new file mode 100644 index 0000000000..0f2d8abdcd --- /dev/null +++ b/octavia/tests/unit/common/test_base_taskflow.py @@ -0,0 +1,64 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import concurrent.futures +import mock +from oslo.config import cfg +from oslo.config import fixture as oslo_fixture +from taskflow import engines as tf_engines + +from octavia.common import base_taskflow +import octavia.tests.unit.base as base + +MAX_WORKERS = 1 + +_engine_mock = mock.MagicMock() + + +class TestBaseTaskFlowEngine(base.TestCase): + + def setUp(self): + + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="task_flow", max_workers=MAX_WORKERS) + conf.config(group="task_flow", engine='TESTENGINE') + super(TestBaseTaskFlowEngine, self).setUp() + + @mock.patch('concurrent.futures.ThreadPoolExecutor', + return_value='TESTEXECUTOR') + @mock.patch('taskflow.engines.load', + return_value=_engine_mock) + def test_taskflow_load(self, + mock_tf_engine_load, + mock_ThreadPoolExecutor): + + # Test __init__ + + base_taskflow_engine = base_taskflow.BaseTaskFlowEngine() + + concurrent.futures.ThreadPoolExecutor.assert_called_once_with( + max_workers=MAX_WORKERS) + + # Test _taskflow_load + + base_taskflow_engine._taskflow_load('TEST') + + tf_engines.load.assert_called_once_with( + 'TEST', + engine_conf='TESTENGINE', + executor='TESTEXECUTOR') + + _engine_mock.compile.assert_called_once() + _engine_mock.prepare.assert_called_once() diff --git a/octavia/tests/unit/controller/worker/__init__.py b/octavia/tests/unit/controller/worker/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/controller/worker/flows/__init__.py b/octavia/tests/unit/controller/worker/flows/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/controller/worker/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/flows/test_amphora_flows.py new file mode 100644 index 0000000000..ce894eb30f --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_amphora_flows.py @@ -0,0 +1,67 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import amphora_flows +import octavia.tests.unit.base as base + +AUTH_VERSION = '2' + + +class TestAmphoraFlows(base.TestCase): + + def setUp(self): + self.AmpFlow = amphora_flows.AmphoraFlows() + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="keystone_authtoken", auth_version=AUTH_VERSION) + + super(TestAmphoraFlows, self).setUp() + + def test_get_create_amphora_flow(self): + + amp_flow = self.AmpFlow.get_create_amphora_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn('amphora', amp_flow.provides) + + self.assertEqual(len(amp_flow.provides), 1) + self.assertEqual(len(amp_flow.requires), 0) + + def test_get_create_amphora_for_lb_flow(self): + + amp_flow = self.AmpFlow.get_create_amphora_for_lb_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn('amphora', amp_flow.provides) + self.assertIn('loadbalancer', amp_flow.requires) + + self.assertEqual(len(amp_flow.provides), 4) + self.assertEqual(len(amp_flow.requires), 3) + + def test_get_delete_amphora_flow(self): + + amp_flow = self.AmpFlow.get_delete_amphora_flow() + + self.assertIsInstance(amp_flow, flow.Flow) + + self.assertIn('amphora', amp_flow.requires) + + self.assertEqual(len(amp_flow.provides), 0) + self.assertEqual(len(amp_flow.requires), 1) diff --git a/octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py b/octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py new file mode 100644 index 0000000000..64cc2979bc --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_health_monitor_flows.py @@ -0,0 +1,72 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import health_monitor_flows +import octavia.tests.unit.base as base + + +class TestHealthMonitorFlows(base.TestCase): + + def setUp(self): + self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows() + + super(TestHealthMonitorFlows, self).setUp() + + def test_get_create_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_create_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn('listener', health_mon_flow.requires) + self.assertIn('loadbalancer', health_mon_flow.requires) + self.assertIn('vip', health_mon_flow.requires) + + self.assertEqual(len(health_mon_flow.requires), 3) + self.assertEqual(len(health_mon_flow.provides), 0) + + def test_get_delete_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_delete_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn('health_mon', health_mon_flow.requires) + self.assertIn('health_mon_id', health_mon_flow.requires) + self.assertIn('listener', health_mon_flow.requires) + self.assertIn('vip', health_mon_flow.requires) + + self.assertEqual(len(health_mon_flow.requires), 5) + self.assertEqual(len(health_mon_flow.provides), 0) + + def test_get_update_health_monitor_flow(self): + + health_mon_flow = (self.HealthMonitorFlow. + get_update_health_monitor_flow()) + + self.assertIsInstance(health_mon_flow, flow.Flow) + + self.assertIn('listener', health_mon_flow.requires) + self.assertIn('loadbalancer', health_mon_flow.requires) + self.assertIn('vip', health_mon_flow.requires) + self.assertIn('health_mon', health_mon_flow.requires) + self.assertIn('update_dict', health_mon_flow.requires) + + self.assertEqual(len(health_mon_flow.requires), 5) + self.assertEqual(len(health_mon_flow.provides), 0) diff --git a/octavia/tests/unit/controller/worker/flows/test_listener_flows.py b/octavia/tests/unit/controller/worker/flows/test_listener_flows.py new file mode 100644 index 0000000000..d62ce54ea1 --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_listener_flows.py @@ -0,0 +1,67 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import listener_flows +import octavia.tests.unit.base as base + + +class TestListenerFlows(base.TestCase): + + def setUp(self): + self.ListenerFlow = listener_flows.ListenerFlows() + + super(TestListenerFlows, self).setUp() + + def test_get_create_listener_flow(self): + + listener_flow = self.ListenerFlow.get_create_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn('listener', listener_flow.requires) + self.assertIn('loadbalancer', listener_flow.requires) + self.assertIn('vip', listener_flow.requires) + + self.assertEqual(len(listener_flow.requires), 3) + self.assertEqual(len(listener_flow.provides), 0) + + def test_get_delete_listener_flow(self): + + listener_flow = self.ListenerFlow.get_delete_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn('listener', listener_flow.requires) + self.assertIn('loadbalancer', listener_flow.requires) + self.assertIn('vip', listener_flow.requires) + + self.assertEqual(len(listener_flow.requires), 3) + self.assertEqual(len(listener_flow.provides), 0) + + def test_get_update_listener_flow(self): + + listener_flow = self.ListenerFlow.get_update_listener_flow() + + self.assertIsInstance(listener_flow, flow.Flow) + + self.assertIn('listener', listener_flow.requires) + self.assertIn('loadbalancer', listener_flow.requires) + self.assertIn('vip', listener_flow.requires) + self.assertIn('update_dict', listener_flow.requires) + + self.assertEqual(len(listener_flow.requires), 4) + self.assertEqual(len(listener_flow.provides), 0) diff --git a/octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py new file mode 100644 index 0000000000..026c44d78d --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_load_balancer_flows.py @@ -0,0 +1,76 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import load_balancer_flows +import octavia.tests.unit.base as base + + +class TestLoadBalancerFlows(base.TestCase): + + def setUp(self): + self.LBFlow = load_balancer_flows.LoadBalancerFlows() + + super(TestLoadBalancerFlows, self).setUp() + + def test_get_create_load_balancer_flow(self): + + lb_flow = self.LBFlow.get_create_load_balancer_flow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn('amphora', lb_flow.provides) + self.assertIn('delta', lb_flow.provides) + self.assertIn('nics', lb_flow.provides) + + self.assertIn('loadbalancer', lb_flow.requires) + + self.assertEqual(len(lb_flow.provides), 4) + self.assertEqual(len(lb_flow.requires), 3) + + def test_get_delete_load_balancer_flow(self): + + lb_flow = self.LBFlow.get_delete_load_balancer_flow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn('loadbalancer', lb_flow.requires) + + self.assertEqual(len(lb_flow.provides), 0) + self.assertEqual(len(lb_flow.requires), 1) + + def test_get_new_LB_networking_subflow(self): + + lb_flow = self.LBFlow.get_new_LB_networking_subflow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn('amphora', lb_flow.requires) + self.assertIn('loadbalancer', lb_flow.requires) + + self.assertEqual(len(lb_flow.provides), 2) + self.assertEqual(len(lb_flow.requires), 2) + + def test_get_update_load_balancer_flow(self): + + lb_flow = self.LBFlow.get_update_load_balancer_flow() + + self.assertIsInstance(lb_flow, flow.Flow) + + self.assertIn('loadbalancer', lb_flow.requires) + + self.assertEqual(len(lb_flow.provides), 0) + self.assertEqual(len(lb_flow.requires), 1) diff --git a/octavia/tests/unit/controller/worker/flows/test_member_flows.py b/octavia/tests/unit/controller/worker/flows/test_member_flows.py new file mode 100644 index 0000000000..3d33fb513b --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_member_flows.py @@ -0,0 +1,69 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import member_flows +import octavia.tests.unit.base as base + + +class TestMemberFlows(base.TestCase): + + def setUp(self): + self.MemberFlow = member_flows.MemberFlows() + + super(TestMemberFlows, self).setUp() + + def test_get_create_member_flow(self): + + member_flow = self.MemberFlow.get_create_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn('listener', member_flow.requires) + self.assertIn('loadbalancer', member_flow.requires) + self.assertIn('vip', member_flow.requires) + + self.assertEqual(len(member_flow.requires), 3) + self.assertEqual(len(member_flow.provides), 0) + + def test_get_delete_member_flow(self): + + member_flow = self.MemberFlow.get_delete_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn('listener', member_flow.requires) + self.assertIn('loadbalancer', member_flow.requires) + self.assertIn('member', member_flow.requires) + self.assertIn('member_id', member_flow.requires) + self.assertIn('vip', member_flow.requires) + + self.assertEqual(len(member_flow.requires), 5) + self.assertEqual(len(member_flow.provides), 0) + + def test_get_update_member_flow(self): + + member_flow = self.MemberFlow.get_update_member_flow() + + self.assertIsInstance(member_flow, flow.Flow) + + self.assertIn('listener', member_flow.requires) + self.assertIn('loadbalancer', member_flow.requires) + self.assertIn('vip', member_flow.requires) + self.assertIn('update_dict', member_flow.requires) + + self.assertEqual(len(member_flow.requires), 5) + self.assertEqual(len(member_flow.provides), 0) diff --git a/octavia/tests/unit/controller/worker/flows/test_pool_flows.py b/octavia/tests/unit/controller/worker/flows/test_pool_flows.py new file mode 100644 index 0000000000..284a8662b3 --- /dev/null +++ b/octavia/tests/unit/controller/worker/flows/test_pool_flows.py @@ -0,0 +1,70 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from taskflow.patterns import linear_flow as flow + +from octavia.controller.worker.flows import pool_flows +import octavia.tests.unit.base as base + + +class TestPoolFlows(base.TestCase): + + def setUp(self): + self.PoolFlow = pool_flows.PoolFlows() + + super(TestPoolFlows, self).setUp() + + def test_get_create_pool_flow(self): + + pool_flow = self.PoolFlow.get_create_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn('listener', pool_flow.requires) + self.assertIn('loadbalancer', pool_flow.requires) + self.assertIn('vip', pool_flow.requires) + + self.assertEqual(len(pool_flow.requires), 3) + self.assertEqual(len(pool_flow.provides), 0) + + def test_get_delete_pool_flow(self): + + pool_flow = self.PoolFlow.get_delete_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn('listener', pool_flow.requires) + self.assertIn('loadbalancer', pool_flow.requires) + self.assertIn('pool', pool_flow.requires) + self.assertIn('pool_id', pool_flow.requires) + self.assertIn('vip', pool_flow.requires) + + self.assertEqual(len(pool_flow.requires), 5) + self.assertEqual(len(pool_flow.provides), 0) + + def test_get_update_pool_flow(self): + + pool_flow = self.PoolFlow.get_update_pool_flow() + + self.assertIsInstance(pool_flow, flow.Flow) + + self.assertIn('pool', pool_flow.requires) + self.assertIn('listener', pool_flow.requires) + self.assertIn('loadbalancer', pool_flow.requires) + self.assertIn('vip', pool_flow.requires) + self.assertIn('update_dict', pool_flow.requires) + + self.assertEqual(len(pool_flow.requires), 5) + self.assertEqual(len(pool_flow.provides), 0) diff --git a/octavia/tests/unit/controller/worker/tasks/__init__.py b/octavia/tests/unit/controller/worker/tasks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py new file mode 100644 index 0000000000..04160fd044 --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_amphora_driver_tasks.py @@ -0,0 +1,237 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.controller.worker.tasks import amphora_driver_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() +LISTENER_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() + +_amphora_mock = mock.MagicMock() +_amphora_mock.id = AMP_ID +_listener_mock = mock.MagicMock() +_listener_mock.id = LISTENER_ID +_vip_mock = mock.MagicMock() +_LB_mock = mock.MagicMock() + + +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.api.get_session', return_value='TEST') +@mock.patch('octavia.controller.worker.tasks.amphora_driver_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +@mock.patch('stevedore.driver.DriverManager.driver') +class TestDatabaseTasks(base.TestCase): + + def setUp(self): + + _LB_mock.amphorae = _amphora_mock + _LB_mock.id = LB_ID + super(TestDatabaseTasks, self).setUp() + + def test_listener_update(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_update_obj = amphora_driver_tasks.ListenerUpdate() + listener_update_obj.execute(_listener_mock, _vip_mock) + + mock_driver.update.assert_called_once_with( + _listener_mock, _vip_mock) + + # Test the revert + amp = listener_update_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listener_stop(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_stop_obj = amphora_driver_tasks.ListenerStop() + listener_stop_obj.execute(_listener_mock, _vip_mock) + + mock_driver.stop.assert_called_once_with( + _listener_mock, _vip_mock) + + # Test the revert + amp = listener_stop_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listener_start(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_start_obj = amphora_driver_tasks.ListenerStart() + listener_start_obj.execute(_listener_mock, _vip_mock) + + mock_driver.start.assert_called_once_with( + _listener_mock, _vip_mock) + + # Test the revert + amp = listener_start_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_listener_delete(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + listener_delete_obj = amphora_driver_tasks.ListenerDelete() + listener_delete_obj.execute(_listener_mock, _vip_mock) + + mock_driver.delete.assert_called_once_with( + _listener_mock, _vip_mock) + + # Test the revert + amp = listener_delete_obj.revert(_listener_mock) + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + id=LISTENER_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_amphora_get_info(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo() + amphora_get_info_obj.execute(_amphora_mock) + + mock_driver.get_info.assert_called_once_with( + _amphora_mock) + + def test_amphora_get_diagnostics(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_get_diagnostics_obj = (amphora_driver_tasks. + AmphoraGetDiagnostics()) + amphora_get_diagnostics_obj.execute(_amphora_mock) + + mock_driver.get_diagnostics.assert_called_once_with( + _amphora_mock) + + def test_amphora_finalize(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize() + amphora_finalize_obj.execute(_amphora_mock) + + mock_driver.finalize_amphora.assert_called_once_with( + _amphora_mock) + + # Test revert + amp = amphora_finalize_obj.revert(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + provisioning_status=constants.ERROR) + self.assertIsNone(amp) + + def test_amphora_post_network_plug(self, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_post_network_plug_obj = (amphora_driver_tasks. + AmphoraPostNetworkPlug()) + amphora_post_network_plug_obj.execute(_amphora_mock) + + (mock_driver.post_network_plug. + assert_called_once_with)(_amphora_mock) + + # Test revert + amp = amphora_post_network_plug_obj.revert(_amphora_mock) + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + id=AMP_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.update') + def test_amphora_post_vip_plug(self, + mock_loadbalancer_repo_update, + mock_driver, + mock_generate_uuid, + mock_log, + mock_get_session, + mock_listener_repo_update, + mock_amphora_repo_update): + + amphora_post_vip_plug_obj = (amphora_driver_tasks. + AmphoraPostVIPPlug()) + amphora_post_vip_plug_obj.execute(_LB_mock) + + (mock_driver.post_vip_plug. + assert_called_once_with)(_LB_mock) + + # Test revert + amp = amphora_post_vip_plug_obj.revert(_LB_mock) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + self.assertIsNone(amp) diff --git a/octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py new file mode 100644 index 0000000000..47ddb28395 --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_compute_tasks.py @@ -0,0 +1,141 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import time + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.common import exceptions +from octavia.controller.worker.tasks import compute_tasks +import octavia.tests.unit.base as base + +AMP_FLAVOR_ID = 10 +AMP_IMAGE_ID = 11 +AMP_SSH_KEY = None +AMP_NET = uuidutils.generate_uuid() +AMP_SEC_GROUPS = None +AMP_WAIT = 12 +AMPHORA_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.1' +AUTH_VERSION = '2' + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + +_amphora_mock = mock.MagicMock() + + +class TestComputeTasks(base.TestCase): + + def setUp(self): + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID) + conf.config(group="controller_worker", amp_image_id=AMP_IMAGE_ID) + conf.config(group="controller_worker", amp_ssh_key=AMP_SSH_KEY) + conf.config(group="controller_worker", amp_network=AMP_NET) + conf.config(group="controller_worker", amp_active_wait_sec=AMP_WAIT) + conf.config(group="keystone_authtoken", auth_version=AUTH_VERSION) + + _amphora_mock.id = AMPHORA_ID + + logging_mock = mock.MagicMock() + compute_tasks.LOG = logging_mock + + super(TestComputeTasks, self).setUp() + + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create(self, + mock_driver): + + mock_driver.build.side_effect = [COMPUTE_ID, TestException('test')] + + # Test execute() + createcompute = compute_tasks.ComputeCreate() + amphora = createcompute.execute(_amphora_mock) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + AMPHORA_ID, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + key_name=AMP_SSH_KEY, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET) + + # Make sure it returns the expected compute_id + assert(amphora.compute_id == COMPUTE_ID) + + # Test that a build exception is raised + + createcompute = compute_tasks.ComputeCreate() + self.assertRaises(TestException, + createcompute.execute, + amphora=_amphora_mock) + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(_amphora_mock) + + # Validate that the compute_id is cleared + self.assertIsNone(_amphora_mock.compute_id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(_amphora_mock) + + @mock.patch('stevedore.driver.DriverManager.driver') + @mock.patch('time.sleep') + def test_compute_wait(self, + mock_time_sleep, + mock_driver): + + _amphora_mock.compute_id = COMPUTE_ID + _amphora_mock.status = constants.ACTIVE + _amphora_mock.lb_network_ip = LB_NET_IP + + mock_driver.get_amphora.return_value = _amphora_mock + + computewait = compute_tasks.ComputeWait() + amphora = computewait.execute(_amphora_mock) + + time.sleep.assert_called_once_with(AMP_WAIT) + + mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID) + + assert(amphora.lb_network_ip == LB_NET_IP) + + _amphora_mock.status = constants.DELETED + + self.assertRaises(exceptions.ComputeWaitTimeoutException, + computewait.execute, + _amphora_mock) diff --git a/octavia/tests/unit/controller/worker/tasks/test_controller_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_controller_tasks.py new file mode 100644 index 0000000000..637d9095c7 --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_controller_tasks.py @@ -0,0 +1,120 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_utils import uuidutils + +from octavia.controller.worker import controller_worker +from octavia.controller.worker.tasks import controller_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() +LB1_ID = uuidutils.generate_uuid() +LB2_ID = uuidutils.generate_uuid() +LISTENER1_ID = uuidutils.generate_uuid() +LISTENER2_ID = uuidutils.generate_uuid() + +_lb1_mock = mock.MagicMock() +_lb1_mock.id = LB1_ID +_lb2_mock = mock.MagicMock() +_lb2_mock.id = LB2_ID +_lbs = [_lb1_mock, _lb2_mock] + +_listener1_mock = mock.MagicMock() +_listener1_mock.id = LISTENER1_ID +_listener1_mock.enabled = False +_listener2_mock = mock.MagicMock() +_listener2_mock.id = LISTENER2_ID +_listener2_mock.enabled = True +_listeners = [_listener1_mock, _listener2_mock] + + +@mock.patch('octavia.db.api.get_session', return_value='TEST') +class TestControllerTasks(base.TestCase): + + def setUp(self): + + self.amphora_mock = mock.MagicMock() + self.amphora_mock.id = AMP_ID + + self.loadbalancer_mock = mock.MagicMock() + self.loadbalancer_mock.id = LB1_ID + self.loadbalancer_mock.enabled = True + + super(TestControllerTasks, self).setUp() + + @mock.patch('octavia.controller.worker.controller_worker.' + 'ControllerWorker.delete_load_balancer') + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'get_all_lbs_on_amphora', + return_value=_lbs) + def test_delete_load_balancers_on_amp(self, + mock_get_all_lbs_on_amp, + mock_delete_lb, + mock_get_session): + + delete_lbs_on_amp = controller_tasks.DeleteLoadBalancersOnAmp() + delete_lbs_on_amp.execute(self.amphora_mock) + + repo.AmphoraRepository.get_all_lbs_on_amphora.assert_called_once_with( + 'TEST', + amphora_id=AMP_ID) + + (controller_worker. + ControllerWorker.delete_load_balancer.assert_has_calls)([ + mock.call(LB1_ID), + mock.call(LB2_ID)], any_order=False) + + @mock.patch('octavia.controller.worker.controller_worker.' + 'ControllerWorker.delete_listener') + @mock.patch('octavia.db.repositories.ListenerRepository.' + 'get_all', return_value=_listeners) + def test_delete_listeners_on_lb(self, + mock_get_all, + mock_delete_listener, + mock_get_session): + + delete_listeners_on_lb = controller_tasks.DeleteListenersOnLB() + delete_listeners_on_lb.execute(self.loadbalancer_mock) + + repo.ListenerRepository.get_all.assert_called_once_with( + 'TEST', + load_balancer_id=LB1_ID) + + (controller_worker. + ControllerWorker.delete_listener.assert_has_calls)([ + mock.call(LISTENER1_ID), + mock.call(LISTENER2_ID)], any_order=False) + + @mock.patch('octavia.controller.worker.controller_worker.' + 'ControllerWorker.update_listener') + @mock.patch('octavia.db.repositories.ListenerRepository.' + 'get_all', return_value=_listeners) + def test_disable_enable_lb(self, + mock_get_all, + mock_update_listener, + mock_get_session): + + disable_enable_lb = controller_tasks.DisableEnableLB() + disable_enable_lb.execute(self.loadbalancer_mock) + + repo.ListenerRepository.get_all.assert_called_once_with( + 'TEST', + load_balancer_id=LB1_ID) + + (controller_worker. + ControllerWorker.update_listener.assert_has_calls)([ + mock.call({'enabled': True}, LISTENER1_ID)], any_order=False) diff --git a/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py new file mode 100644 index 0000000000..b78434daef --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py @@ -0,0 +1,746 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.controller.worker.tasks import database_tasks +from octavia.db import repositories as repo +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() +COMPUTE_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +LB_NET_IP = '192.0.2.2' +LISTENER_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() + +_amphora_mock = mock.MagicMock() +_amphora_mock.id = AMP_ID +_amphora_mock.compute_id = COMPUTE_ID +_amphora_mock.lb_network_ip = LB_NET_IP +_loadbalancer_mock = mock.MagicMock() +_loadbalancer_mock.id = LB_ID + + +@mock.patch('octavia.db.repositories.AmphoraRepository.update') +@mock.patch('octavia.db.repositories.ListenerRepository.update') +@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') +@mock.patch('octavia.db.api.get_session', return_value='TEST') +@mock.patch('octavia.controller.worker.tasks.database_tasks.LOG') +@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID) +class TestDatabaseTasks(base.TestCase): + + def setUp(self): + + self.health_mon_mock = mock.MagicMock() + self.health_mon_mock.pool_id = HM_ID + + self.listener_mock = mock.MagicMock() + self.listener_mock.id = LISTENER_ID + + self.loadbalancer_mock = mock.MagicMock() + self.loadbalancer_mock.id = LB_ID + + self.member_mock = mock.MagicMock() + self.member_mock.id = MEMBER_ID + + self.pool_mock = mock.MagicMock() + self.pool_mock.id = POOL_ID + + super(TestDatabaseTasks, self).setUp() + + @mock.patch('octavia.db.repositories.AmphoraRepository.create', + return_value=_amphora_mock) + def test_create_amphora_in_db(self, + mock_create, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + create_amp_in_db = database_tasks.CreateAmphoraInDB() + amp = create_amp_in_db.execute() + + repo.AmphoraRepository.create.assert_called_once_with( + 'TEST', + id=AMP_ID, + status=constants.PENDING_CREATE) + + assert(amp == _amphora_mock) + + # Test the revert + +# TODO(johnsom) finish when this method is updated + amp = create_amp_in_db.revert() + + self.assertIsNone(amp) + + amp = create_amp_in_db.revert(result='TEST') + + self.assertIsNone(amp) + +# repo.AmphoraRepository.update.assert_called_once_with( +# 'TEST', +# AMP_ID, +# status=constants.ERROR, +# compute_id=COMPUTE_ID) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.delete') + def test_delete_health_monitor_in_db(self, + mock_health_mon_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + delete_health_mon = database_tasks.DeleteHealthMonitorInDB() + delete_health_mon.execute(HM_ID) + + repo.HealthMonitorRepository.delete.assert_called_once_with( + 'TEST', + HM_ID) + + # Test the revert + + mock_health_mon_repo_delete.reset_mock() + delete_health_mon.revert(HM_ID) + +# TODO(johnsom) fix once provisioning status added +# repo.HealthMonitorRepository.update.assert_called_once_with( +# 'TEST', +# HM_ID, +# provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.MemberRepository.delete') + def test_delete_member_in_db(self, + mock_member_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + delete_member = database_tasks.DeleteMemberInDB() + delete_member.execute(MEMBER_ID) + + repo.MemberRepository.delete.assert_called_once_with( + 'TEST', + MEMBER_ID) + + # Test the revert + + mock_member_repo_delete.reset_mock() + delete_member.revert(MEMBER_ID) + +# TODO(johnsom) Fix +# repo.MemberRepository.delete.assert_called_once_with( +# 'TEST', +# MEMBER_ID) + + @mock.patch('octavia.db.repositories.PoolRepository.delete') + def test_delete_pool_in_db(self, + mock_pool_repo_delete, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + delete_pool = database_tasks.DeletePoolInDB() + delete_pool.execute(POOL_ID) + + repo.PoolRepository.delete.assert_called_once_with( + 'TEST', + POOL_ID) + + # Test the revert + + mock_pool_repo_delete.reset_mock() + delete_pool.revert(POOL_ID) + +# TODO(johnsom) Fix +# repo.PoolRepository.update.assert_called_once_with( +# 'TEST', +# POOL_ID, +# operating_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) + def test_get_amphora_by_id(self, + mock_amp_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + get_amp_by_id = database_tasks.GetAmphoraByID() + amp = get_amp_by_id.execute(AMP_ID) + + repo.AmphoraRepository.get.assert_called_once_with( + 'TEST', + id=AMP_ID) + + self.assertEqual(_amphora_mock, amp) + + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + def test_get_loadbalancer_by_id(self, + mock_lb_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + get_lb_by_id = database_tasks.GetLoadbalancerByID() + lb = get_lb_by_id.execute(LB_ID) + + repo.LoadBalancerRepository.get.assert_called_once_with( + 'TEST', + id=LB_ID) + + self.assertEqual(_loadbalancer_mock, lb) + + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'allocate_and_associate', + side_effect=[_amphora_mock, None]) + def test_map_loadbalancer_to_amphora(self, + mock_allocate_and_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora() + amp = map_lb_to_amp.execute(self.loadbalancer_mock) + + repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( + 'TEST', + LB_ID) + + assert amp == _amphora_mock + + amp = map_lb_to_amp.execute(self.loadbalancer_mock) + + self.assertIsNone(amp) + + @mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) + @mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_loadbalancer_mock) + def test_mark_amphora_allocated_in_db(self, + mock_loadbalancer_repo_get, + mock_amphora_repo_get, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_amp_allocated_in_db = (database_tasks. + MarkAmphoraAllocatedInDB()) + mark_amp_allocated_in_db.execute(_amphora_mock, + self.loadbalancer_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_ALLOCATED, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP, + load_balancer_id=LB_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_allocated_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_booting_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_amp_booting_in_db = database_tasks.MarkAmphoraBootingInDB() + mark_amp_booting_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_BOOTING, + compute_id=COMPUTE_ID) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_booting_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID) + + def test_mark_amphora_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_amp_deleted_in_db = database_tasks.MarkAmphoraDeletedInDB() + mark_amp_deleted_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.DELETED) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_deleted_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_delete_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_amp_pending_delete_in_db = (database_tasks. + MarkAmphoraPendingDeleteInDB()) + mark_amp_pending_delete_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.PENDING_DELETE) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_pending_delete_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_pending_update_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_amp_pending_update_in_db = (database_tasks. + MarkAmphoraPendingUpdateInDB()) + mark_amp_pending_update_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.PENDING_UPDATE) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_pending_update_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR) + + def test_mark_amphora_ready_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + _amphora_mock.lb_network_ip = LB_NET_IP + + mark_amp_ready_in_db = database_tasks.MarkAmphoraReadyInDB() + mark_amp_ready_in_db.execute(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.AMPHORA_READY, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP) + + # Test the revert + + mock_amphora_repo_update.reset_mock() + mark_amp_ready_in_db.revert(_amphora_mock) + + repo.AmphoraRepository.update.assert_called_once_with( + 'TEST', + AMP_ID, + status=constants.ERROR, + compute_id=COMPUTE_ID, + lb_network_ip=LB_NET_IP) + + def test_mark_listener_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_listener_active = database_tasks.MarkListenerActiveInDB() + mark_listener_active.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + + mock_listener_repo_update.reset_mock() + mark_listener_active.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_listener_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_listener_deleted = database_tasks.MarkListenerDeletedInDB() + mark_listener_deleted.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.DELETED) + + # Test the revert + + mock_listener_repo_update.reset_mock() + mark_listener_deleted.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_listener_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_listener_pending_delete = (database_tasks. + MarkListenerPendingDeleteInDB()) + mark_listener_pending_delete.execute(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + + mock_listener_repo_update.reset_mock() + mark_listener_pending_delete.revert(self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ERROR) + + def test_mark_lb_and_listener_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_lb_and_listener_active = (database_tasks. + MarkLBAndListenerActiveInDB()) + mark_lb_and_listener_active.execute(self.loadbalancer_mock, + self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ACTIVE) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + + mark_lb_and_listener_active.revert(self.loadbalancer_mock, + self.listener_mock) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + provisioning_status=constants.ERROR) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ERROR) + + def test_mark_LB_active_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_loadbalancer_active = database_tasks.MarkLBActiveInDB() + mark_loadbalancer_active.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ACTIVE) + + # Test the revert + + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_active.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ERROR) + + def test_mark_LB_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_loadbalancer_deleted = database_tasks.MarkLBDeletedInDB() + mark_loadbalancer_deleted.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.DELETED) + + # Test the revert + + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_deleted.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ERROR) + + def test_mark_LB_pending_deleted_in_db(self, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + mark_loadbalancer_pending_delete = (database_tasks. + MarkLBPendingDeleteInDB()) + mark_loadbalancer_pending_delete.execute(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.PENDING_DELETE) + + # Test the revert + + mock_loadbalancer_repo_update.reset_mock() + mark_loadbalancer_pending_delete.revert(self.loadbalancer_mock) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + def test_update_health_monitor_in_db(self, + mock_health_mon_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + update_health_mon = database_tasks.UpdateHealthMonInDB() + update_health_mon.execute(self.health_mon_mock, + {'delay': 1, 'timeout': 2}) + + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', + HM_ID, + delay=1, timeout=2) + + # Test the revert + + mock_health_mon_repo_update.reset_mock() + update_health_mon.revert(self.health_mon_mock) + +# TODO(johnsom) fix this to set the upper ojects to ERROR + repo.HealthMonitorRepository.update.assert_called_once_with( + 'TEST', + HM_ID, + enabled=0) + + @mock.patch('octavia.db.repositories.ListenerRepository.update') + def test_update_listener_in_db(self, + mock_listner_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + update_listener = database_tasks.UpdateListenerInDB() + update_listener.execute(self.listener_mock, + {'name': 'test', 'description': 'test2'}) + + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + name='test', description='test2') + + # Test the revert + + mock_listener_repo_update.reset_mock() + update_listener.revert(self.listener_mock) + +# TODO(johnsom) fix this to set the upper ojects to ERROR + repo.ListenerRepository.update.assert_called_once_with( + 'TEST', + LISTENER_ID, + enabled=0) + + @mock.patch('octavia.db.repositories.MemberRepository.update') + def test_update_member_in_db(self, + mock_member_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + update_member = database_tasks.UpdateMemberInDB() + update_member.execute(self.member_mock, + {'weight': 1, 'ip_address': '10.1.0.0'}) + + repo.MemberRepository.update.assert_called_once_with( + 'TEST', + MEMBER_ID, + weight=1, ip_address='10.1.0.0') + + # Test the revert + + mock_member_repo_update.reset_mock() + update_member.revert(self.member_mock) + +# TODO(johnsom) fix this to set the upper ojects to ERROR + repo.MemberRepository.update.assert_called_once_with( + 'TEST', + MEMBER_ID, + enabled=0) + + @mock.patch('octavia.db.repositories.PoolRepository.update') + def test_update_pool_in_db(self, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update): + + update_pool = database_tasks.UpdatePoolInDB() + update_pool.execute(self.pool_mock, + {'name': 'test', 'description': 'test2'}) + + repo.PoolRepository.update.assert_called_once_with( + 'TEST', + POOL_ID, + name='test', description='test2') + + # Test the revert + + mock_pool_repo_update.reset_mock() + update_pool.revert(self.pool_mock) + +# TODO(johnsom) fix this to set the upper ojects to ERROR + repo.PoolRepository.update.assert_called_once_with( + 'TEST', + POOL_ID, + enabled=0) diff --git a/octavia/tests/unit/controller/worker/tasks/test_model_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_model_tasks.py new file mode 100644 index 0000000000..117f3747d6 --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_model_tasks.py @@ -0,0 +1,44 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock + +from octavia.controller.worker.tasks import model_tasks +import octavia.tests.unit.base as base + + +class TestObjectUpdateTasks(base.TestCase): + + def setUp(self): + + self.listener_mock = mock.MagicMock() + self.listener_mock.name = 'TEST' + + super(TestObjectUpdateTasks, self).setUp() + + def test_delete_model_object(self): + + delete_object = model_tasks.DeleteModelObject() + delete_object.execute(self.listener_mock) + + self.listener_mock.delete.assert_called_once() + + def test_update_listener(self): + + update_attr = model_tasks.UpdateAttributes() + update_attr.execute(self.listener_mock, + {'name': 'TEST2'}) + + assert self.listener_mock.name == 'TEST2' diff --git a/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py new file mode 100644 index 0000000000..b013f803e9 --- /dev/null +++ b/octavia/tests/unit/controller/worker/tasks/test_network_tasks.py @@ -0,0 +1,211 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_config import cfg +from oslo_config import fixture as oslo_fixture +from oslo_utils import uuidutils + +from octavia.controller.worker.tasks import network_tasks +from octavia.network import base as net_base +from octavia.network import data_models +import octavia.tests.unit.base as base + + +AMPHORA_ID = 7 +COMPUTE_ID = uuidutils.generate_uuid() +PORT_ID = uuidutils.generate_uuid() +NETWORK_ID = uuidutils.generate_uuid() +IP_ADDRESS = "172.24.41.1" + + +class TestException(Exception): + + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) + + +@mock.patch('stevedore.driver.DriverManager.driver') +class TestNetworkTasks(base.TestCase): + def setUp(self): + network_tasks.LOG = mock.MagicMock() + self.amphora_mock = mock.MagicMock() + self.amphora_mock.id = AMPHORA_ID + self.amphora_mock.compute_id = COMPUTE_ID + conf = oslo_fixture.Config(cfg.CONF) + conf.config(group="controller_worker", amp_network=[]) + + super(TestNetworkTasks, self).setUp() + + def test_calculate_delta(self, + mock_driver): + EMPTY = {'add': [], 'delete': []} + + def _interface(network_id): + return [data_models.Interface(network_id=network_id)] + + net = network_tasks.CalculateDelta() + + self.amphora_mock.load_balancer = None + self.assertEqual(EMPTY, net.execute(self.amphora_mock, [])) + + lb_mock = mock.MagicMock() + self.amphora_mock.load_balancer = lb_mock + lb_mock.listeners = None + self.assertEqual(EMPTY, net.execute(self.amphora_mock, [])) + + listener_mock = mock.MagicMock() + lb_mock.listeners = [listener_mock] + listener_mock.default_pool = None + self.assertEqual(EMPTY, net.execute(self.amphora_mock, [])) + + pool_mock = mock.MagicMock() + listener_mock.default_pool = pool_mock + pool_mock.members = None + self.assertEqual(EMPTY, net.execute(self.amphora_mock, [])) + + member_mock = mock.MagicMock() + pool_mock.members = [member_mock] + member_mock.subnet_id = 1 + self.assertEqual({'add': _interface(1), 'delete': []}, + net.execute(self.amphora_mock, [])) + + self.assertEqual(EMPTY, net.execute(self.amphora_mock, _interface(1))) + + result = {'add': _interface(1), 'delete': _interface(2)} + self.assertEqual(result, net.execute(self.amphora_mock, _interface(2))) + + pool_mock.members = [] + self.assertEqual({'add': [], 'delete': _interface(2)}, + net.execute(self.amphora_mock, _interface(2))) + + def test_get_plumbed_networks(self, + mock_driver): + mock_driver.get_plugged_networks.side_effect = [['blah']] + net = network_tasks.GetPlumbedNetworks() + + self.assertEqual(['blah'], net.execute(self.amphora_mock)) + mock_driver.get_plugged_networks.assert_called_once_with( + COMPUTE_ID) + + def test_plug_networks(self, + mock_driver): + net = network_tasks.PlugNetworks() + + net.execute(self.amphora_mock, None) + self.assertFalse(mock_driver.plug_network.called) + + delta = {'add': []} + net.execute(self.amphora_mock, delta) + self.assertFalse(mock_driver.plug_network.called) + + delta = {'add': [data_models.Interface(network_id=1)]} + net.execute(self.amphora_mock, delta) + mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1) + + # revert + net.revert(self.amphora_mock, None) + self.assertFalse(mock_driver.unplug_network.called) + + delta = {'add': []} + net.revert(self.amphora_mock, delta) + self.assertFalse(mock_driver.unplug_network.called) + + delta = {'add': [data_models.Interface(network_id=1)]} + net.revert(self.amphora_mock, delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.revert(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = TestException('test') + self.assertRaises(TestException, + net.revert, + self.amphora_mock, + delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_unplug_networks(self, + mock_driver): + net = network_tasks.UnPlugNetworks() + + net.execute(self.amphora_mock, None) + self.assertFalse(mock_driver.unplug_network.called) + + delta = {'delete': []} + net.execute(self.amphora_mock, delta) + self.assertFalse(mock_driver.unplug_network.called) + + delta = {'delete': [data_models.Interface(network_id=1)]} + net.execute(self.amphora_mock, delta) + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = net_base.NetworkNotFound + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + # Do a test with a general exception in case behavior changes + mock_driver.reset_mock() + mock_driver.unplug_network.side_effect = Exception() + net.execute(self.amphora_mock, delta) # No exception + mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1) + + def test_plug_vip(self, + mock_driver): + net = network_tasks.PlugVIP() + + lb_mock = mock.MagicMock() + self.amphora_mock.load_balancer = lb_mock + vip_mock = mock.MagicMock() + lb_mock.vip = vip_mock + + mock_driver.plug_vip.side_effect = ["vip"] + + net.execute(self.amphora_mock) + mock_driver.plug_vip.assert_called_once_with(lb_mock, vip_mock) + self.assertEqual("vip", lb_mock.vip) + + # revert + lb_mock.vip = vip_mock + net.revert(self.amphora_mock) + mock_driver.unplug_vip.assert_called_once_with(lb_mock, vip_mock) + + def test_allocate_vip(self, mock_driver): + net = network_tasks.AllocateVIP() + + mock_driver.allocate_vip.side_effect = ["vip"] + + mock_driver.reset_mock() + self.assertEqual("vip", net.execute(PORT_ID, NETWORK_ID, IP_ADDRESS)) + mock_driver.allocate_vip.assert_called_once_with(PORT_ID, + NETWORK_ID, + IP_ADDRESS) + # revert + vip_mock = mock.MagicMock() + net.revert(vip_mock) + mock_driver.deallocate_vip.assert_called_once_with(vip_mock) + + def test_deallocate_vip(self, mock_driver): + net = network_tasks.DeallocateVIP() + vip_mock = mock.MagicMock() + net.execute(vip_mock) + mock_driver.deallocate_vip.assert_called_once_with(vip_mock) diff --git a/octavia/tests/unit/controller/worker/test_controller_worker.py b/octavia/tests/unit/controller/worker/test_controller_worker.py new file mode 100644 index 0000000000..d6b5f4af2b --- /dev/null +++ b/octavia/tests/unit/controller/worker/test_controller_worker.py @@ -0,0 +1,602 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import mock +from oslo_utils import uuidutils + +from octavia.common import base_taskflow +from octavia.common import exceptions +from octavia.controller.worker import controller_worker +import octavia.tests.unit.base as base + +AMP_ID = uuidutils.generate_uuid() +LB_ID = uuidutils.generate_uuid() +POOL_ID = uuidutils.generate_uuid() +HM_ID = uuidutils.generate_uuid() +MEMBER_ID = uuidutils.generate_uuid() +HEALTH_UPDATE_DICT = {'delay': 1, 'timeout': 2} +LISTENER_UPDATE_DICT = {'name': 'test', 'description': 'test2'} +MEMBER_UPDATE_DICT = {'weight': 1, 'ip_address': '10.0.0.0'} +POOL_UPDATE_DICT = {'name': 'test', 'description': 'test2'} + +_amphora_mock = mock.MagicMock() +_flow_mock = mock.MagicMock() +_health_mon_mock = mock.MagicMock() +_vip_mock = mock.MagicMock() +_listener_mock = mock.MagicMock() +_load_balancer_mock = mock.MagicMock() +_member_mock = mock.MagicMock() +_pool_mock = mock.MagicMock() +_create_map_flow_mock = mock.MagicMock() + + +@mock.patch('octavia.db.repositories.AmphoraRepository.get', + return_value=_amphora_mock) +@mock.patch('octavia.db.repositories.HealthMonitorRepository.get', + return_value=_health_mon_mock) +@mock.patch('octavia.db.repositories.LoadBalancerRepository.get', + return_value=_load_balancer_mock) +@mock.patch('octavia.db.repositories.ListenerRepository.get', + return_value=_listener_mock) +@mock.patch('octavia.db.repositories.MemberRepository.get', + return_value=_member_mock) +@mock.patch('octavia.db.repositories.PoolRepository.get', + return_value=_pool_mock) +@mock.patch('octavia.common.base_taskflow.BaseTaskFlowEngine._taskflow_load', + return_value=_flow_mock) +@mock.patch('taskflow.listeners.logging.DynamicLoggingListener') +@mock.patch('octavia.db.api.get_session', return_value='TEST') +class TestControllerWorker(base.TestCase): + + def setUp(self): + + _health_mon_mock.pool.listener.load_balancer.amphorae = _amphora_mock + _health_mon_mock.pool.listener = _listener_mock + _health_mon_mock.pool.listener.load_balancer.vip = _vip_mock + _listener_mock.load_balancer = _load_balancer_mock + _listener_mock.load_balancer.amphorae = _amphora_mock + _listener_mock.load_balancer.vip = _vip_mock + _member_mock.pool.listener = _listener_mock + _member_mock.pool.listener.load_balancer.vip = _vip_mock + _pool_mock.listener = _listener_mock + _pool_mock.listener.load_balancer.vip = _vip_mock + + fetch_mock = mock.MagicMock(return_value=AMP_ID) + _flow_mock.storage.fetch = fetch_mock + + _health_mon_mock.pool_id = POOL_ID + _health_mon_mock.id = HM_ID + + super(TestControllerWorker, self).setUp() + + @mock.patch('octavia.controller.worker.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_flow', + return_value='TEST') + def test_create_amphora(self, + mock_api_get_session, + mock_get_create_amp_flow, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + amp = cw.create_amphora() + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with('TEST')) + + _flow_mock.run.assert_called_once() + + _flow_mock.storage.fetch.assert_called_once() + + assert (amp == AMP_ID) + + @mock.patch('octavia.controller.worker.flows.' + 'amphora_flows.AmphoraFlows.get_delete_amphora_flow', + return_value='TEST') + def test_delete_amphora(self, + mock_get_delete_amp_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_amphora(AMP_ID) + + mock_amp_repo_get.assert_called_once_with( + 'TEST', + id=AMP_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with('TEST', + store={'amphora': _amphora_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_create_health_monitor_flow', + return_value=_flow_mock) + def test_create_health_monitor(self, + mock_get_create_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.create_health_monitor(_health_mon_mock) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'health_mon': _health_mon_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_delete_health_monitor_flow', + return_value=_flow_mock) + def test_delete_health_monitor(self, + mock_get_delete_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_health_monitor(HM_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'health_mon': _health_mon_mock, + 'health_mon_id': HM_ID, + 'listener': _listener_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'health_monitor_flows.HealthMonitorFlows.' + 'get_update_health_monitor_flow', + return_value=_flow_mock) + def test_update_health_monitor(self, + mock_get_update_hm_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.update_health_monitor(_health_mon_mock.id, + HEALTH_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'health_mon': _health_mon_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock, + 'update_dict': HEALTH_UPDATE_DICT})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'listener_flows.ListenerFlows.get_create_listener_flow', + return_value=_flow_mock) + def test_create_listener(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.create_listener(LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'listener_flows.ListenerFlows.get_delete_listener_flow', + return_value=_flow_mock) + def test_delete_listener(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_listener(LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'listener': _listener_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'listener_flows.ListenerFlows.get_update_listener_flow', + return_value=_flow_mock) + def test_update_listener(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.update_listener(LB_ID, LISTENER_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'listener': _listener_mock, + 'vip': _vip_mock, + 'loadbalancer': _load_balancer_mock, + 'update_dict': + LISTENER_UPDATE_DICT})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_create_load_balancer_flow', + return_value=_flow_mock) + @mock.patch('octavia.controller.worker.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_flow', + return_value='TEST2') + @mock.patch('octavia.controller.worker.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_for_lb_flow', + return_value='TEST2') + def test_create_load_balancer(self, + mock_get_create_amp_for_lb_flow, + mock_get_create_amp_flow, + mock_get_create_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + # Test code path with an existing READY amphora + + cw = controller_worker.ControllerWorker() + cw.create_load_balancer(LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'loadbalancer': + _load_balancer_mock})) + + _flow_mock.run.assert_called_once() + + _flow_mock.storage.fetch.assert_called_once_with('amphora') + + assert mock_get_create_amp_flow.called is False + + fetch_None_mock = mock.MagicMock(side_effect=exceptions. + ComputeBuildException()) + _flow_mock.storage.fetch = fetch_None_mock + + cw.create_load_balancer(LB_ID) + + # Test code path with no existing READY amphora + + _flow_mock.reset_mock() + mock_get_create_lb_flow.reset_mock() + + fetch_None_mock = mock.MagicMock(return_value=None) + + _flow_mock.storage.fetch = fetch_None_mock + + cw.create_load_balancer(LB_ID) + + base_taskflow.BaseTaskFlowEngine._taskflow_load.assert_has_calls([ + mock.call(_flow_mock, + store={'loadbalancer': _load_balancer_mock}), + mock.call('TEST2', + store={'loadbalancer': _load_balancer_mock}), + ], any_order=False) + + _flow_mock.run.assert_any_call() + + assert _flow_mock.run.call_count == 2 + + _flow_mock.storage.fetch.assert_called_once_with('amphora') + + _create_map_flow_mock.run = mock.MagicMock(side_effect=exceptions. + ComputeBuildException) + + mock_taskflow_load.side_effect = [_flow_mock, _create_map_flow_mock] + + self.assertRaises(exceptions.NoSuitableAmphoraException, + cw.create_load_balancer, + LB_ID) + + @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_delete_load_balancer_flow', + return_value=_flow_mock) + def test_delete_load_balancer(self, + mock_get_delete_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_load_balancer(LB_ID) + + mock_lb_repo_get.assert_called_once_with( + 'TEST', + id=LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'loadbalancer': + _load_balancer_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.load_balancer_flows.' + 'LoadBalancerFlows.get_update_load_balancer_flow', + return_value=_flow_mock) + def test_update_load_balancer(self, + mock_get_update_lb_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.update_load_balancer(LB_ID, 'TEST2') + + mock_lb_repo_get.assert_called_once_with( + 'TEST', + id=LB_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'loadbalancer': + _load_balancer_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'member_flows.MemberFlows.get_create_member_flow', + return_value=_flow_mock) + def test_create_member(self, + mock_get_create_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.create_member(MEMBER_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'member': _member_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'member_flows.MemberFlows.get_delete_member_flow', + return_value=_flow_mock) + def test_delete_member(self, + mock_get_delete_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_member(MEMBER_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'member': _member_mock, + 'member_id': MEMBER_ID, + 'listener': _listener_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'member_flows.MemberFlows.get_update_member_flow', + return_value=_flow_mock) + def test_update_member(self, + mock_get_update_member_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.update_member(MEMBER_ID, MEMBER_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'member': _member_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock, + 'update_dict': MEMBER_UPDATE_DICT})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'pool_flows.PoolFlows.get_create_pool_flow', + return_value=_flow_mock) + def test_create_pool(self, + mock_get_create_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.create_pool(POOL_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'pool': _pool_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'pool_flows.PoolFlows.get_delete_pool_flow', + return_value=_flow_mock) + def test_delete_pool(self, + mock_get_delete_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.delete_pool(POOL_ID) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'pool': _pool_mock, + 'pool_id': POOL_ID, + 'listener': _listener_mock, + 'vip': _vip_mock})) + + _flow_mock.run.assert_called_once() + + @mock.patch('octavia.controller.worker.flows.' + 'pool_flows.PoolFlows.get_update_pool_flow', + return_value=_flow_mock) + def test_update_pool(self, + mock_get_update_listener_flow, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + cw = controller_worker.ControllerWorker() + cw.update_pool(POOL_ID, POOL_UPDATE_DICT) + + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with(_flow_mock, + store={'pool': _pool_mock, + 'listener': _listener_mock, + 'loadbalancer': _load_balancer_mock, + 'vip': _vip_mock, + 'update_dict': POOL_UPDATE_DICT})) + + _flow_mock.run.assert_called_once() diff --git a/requirements.txt b/requirements.txt index 60b6f4d318..bb30549f96 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,3 +40,5 @@ pyasn1_modules singledispatch>=3.4.0.3 # For pecan Jinja2>=2.6 # BSD License (3 clause) paramiko +taskflow>=0.5.0 # Apache-2.0 +networkx #this will go away once taskflow doesn't need it any longer diff --git a/setup.cfg b/setup.cfg index e32bfc4b42..cb7dd6d44a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -39,3 +39,13 @@ console_scripts = octavia.api.handlers = simulated_handler = octavia.api.v1.handlers.controller_simulator.handler:SimulatedControllerHandler queue_producer = octavia.api.v1.handlers.queue.producer:ProducerHandler +octavia.amphora.drivers = + amphora_noop_driver = octavia.amphorae.drivers.noop_driver.driver:NoopAmphoraLoadBalancerDriver + amphora_haproxy_rest_driver = octavia.amphorae.drivers.haproxy.rest_driver:NoopAmphoraLoadBalancerDriver + amphora_haproxy_ssh_driver = octavia.amphorae.drivers.haproxy.ssh_driver:HaproxyManager +octavia.compute.drivers = + compute_noop_driver = octavia.compute.drivers.noop_driver.driver:NoopComputeDriver + compute_nova_driver = octavia.compute.drivers.nova_driver:VirtualMachineManager +octavia.network.drivers = + network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver + allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver