Merge "Create Amphora V2 provider driver"
This commit is contained in:
commit
ff4680eb71
344
octavia/api/drivers/amphora_driver/v2/driver.py
Normal file
344
octavia/api/drivers/amphora_driver/v2/driver.py
Normal file
@ -0,0 +1,344 @@
|
|||||||
|
# Copyright 2018 Rackspace, US Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from jsonschema import exceptions as js_exceptions
|
||||||
|
from jsonschema import validate
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
|
from octavia_lib.api.drivers import data_models as driver_dm
|
||||||
|
from octavia_lib.api.drivers import exceptions
|
||||||
|
from octavia_lib.api.drivers import provider_base as driver_base
|
||||||
|
|
||||||
|
from octavia.api.drivers.amphora_driver import flavor_schema
|
||||||
|
from octavia.api.drivers import utils as driver_utils
|
||||||
|
from octavia.common import constants as consts
|
||||||
|
from octavia.common import data_models
|
||||||
|
from octavia.common import rpc
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.db import api as db_apis
|
||||||
|
from octavia.db import repositories
|
||||||
|
from octavia.network import base as network_base
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_group('oslo_messaging', 'octavia.common.config')
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||||
|
def __init__(self):
|
||||||
|
super(AmphoraProviderDriver, self).__init__()
|
||||||
|
self.target = messaging.Target(
|
||||||
|
namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT,
|
||||||
|
topic=consts.TOPIC_AMPHORA_V2, version="2.0", fanout=False)
|
||||||
|
self.client = rpc.get_client(self.target)
|
||||||
|
self.repositories = repositories.Repositories()
|
||||||
|
|
||||||
|
# Load Balancer
|
||||||
|
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||||
|
vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary)
|
||||||
|
lb_obj = data_models.LoadBalancer(id=loadbalancer_id,
|
||||||
|
project_id=project_id, vip=vip_obj)
|
||||||
|
|
||||||
|
network_driver = utils.get_network_driver()
|
||||||
|
try:
|
||||||
|
vip = network_driver.allocate_vip(lb_obj)
|
||||||
|
except network_base.AllocateVIPException as e:
|
||||||
|
raise exceptions.DriverError(user_fault_string=e.orig_msg,
|
||||||
|
operator_fault_string=e.orig_msg)
|
||||||
|
|
||||||
|
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
|
||||||
|
vip.port_id, loadbalancer_id)
|
||||||
|
return driver_utils.vip_dict_to_provider_dict(vip.to_dict())
|
||||||
|
|
||||||
|
# TODO(johnsom) convert this to octavia_lib constant flavor
|
||||||
|
# once octavia is transitioned to use octavia_lib
|
||||||
|
def loadbalancer_create(self, loadbalancer):
|
||||||
|
if loadbalancer.flavor == driver_dm.Unset:
|
||||||
|
loadbalancer.flavor = None
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
|
||||||
|
consts.FLAVOR: loadbalancer.flavor}
|
||||||
|
self.client.cast({}, 'create_load_balancer', **payload)
|
||||||
|
|
||||||
|
def loadbalancer_delete(self, loadbalancer, cascade=False):
|
||||||
|
loadbalancer_id = loadbalancer.loadbalancer_id
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id,
|
||||||
|
'cascade': cascade}
|
||||||
|
self.client.cast({}, 'delete_load_balancer', **payload)
|
||||||
|
|
||||||
|
def loadbalancer_failover(self, loadbalancer_id):
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id}
|
||||||
|
self.client.cast({}, 'failover_load_balancer', **payload)
|
||||||
|
|
||||||
|
def loadbalancer_update(self, old_loadbalancer, new_loadbalancer):
|
||||||
|
# Adapt the provider data model to the queue schema
|
||||||
|
lb_dict = new_loadbalancer.to_dict()
|
||||||
|
if 'admin_state_up' in lb_dict:
|
||||||
|
lb_dict['enabled'] = lb_dict.pop('admin_state_up')
|
||||||
|
lb_id = lb_dict.pop('loadbalancer_id')
|
||||||
|
# Put the qos_policy_id back under the vip element the controller
|
||||||
|
# expects
|
||||||
|
vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None)
|
||||||
|
if vip_qos_policy_id:
|
||||||
|
vip_dict = {"qos_policy_id": vip_qos_policy_id}
|
||||||
|
lb_dict["vip"] = vip_dict
|
||||||
|
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: lb_id,
|
||||||
|
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
||||||
|
self.client.cast({}, 'update_load_balancer', **payload)
|
||||||
|
|
||||||
|
# Listener
|
||||||
|
def listener_create(self, listener):
|
||||||
|
payload = {consts.LISTENER_ID: listener.listener_id}
|
||||||
|
self.client.cast({}, 'create_listener', **payload)
|
||||||
|
|
||||||
|
def listener_delete(self, listener):
|
||||||
|
listener_id = listener.listener_id
|
||||||
|
payload = {consts.LISTENER_ID: listener_id}
|
||||||
|
self.client.cast({}, 'delete_listener', **payload)
|
||||||
|
|
||||||
|
def listener_update(self, old_listener, new_listener):
|
||||||
|
listener_dict = new_listener.to_dict()
|
||||||
|
if 'admin_state_up' in listener_dict:
|
||||||
|
listener_dict['enabled'] = listener_dict.pop('admin_state_up')
|
||||||
|
listener_id = listener_dict.pop('listener_id')
|
||||||
|
if 'client_ca_tls_container_ref' in listener_dict:
|
||||||
|
listener_dict['client_ca_tls_container_id'] = listener_dict.pop(
|
||||||
|
'client_ca_tls_container_ref')
|
||||||
|
listener_dict.pop('client_ca_tls_container_data', None)
|
||||||
|
if 'client_crl_container_ref' in listener_dict:
|
||||||
|
listener_dict['client_crl_container_id'] = listener_dict.pop(
|
||||||
|
'client_crl_container_ref')
|
||||||
|
listener_dict.pop('client_crl_container_data', None)
|
||||||
|
|
||||||
|
payload = {consts.LISTENER_ID: listener_id,
|
||||||
|
consts.LISTENER_UPDATES: listener_dict}
|
||||||
|
self.client.cast({}, 'update_listener', **payload)
|
||||||
|
|
||||||
|
# Pool
|
||||||
|
def pool_create(self, pool):
|
||||||
|
payload = {consts.POOL_ID: pool.pool_id}
|
||||||
|
self.client.cast({}, 'create_pool', **payload)
|
||||||
|
|
||||||
|
def pool_delete(self, pool):
|
||||||
|
pool_id = pool.pool_id
|
||||||
|
payload = {consts.POOL_ID: pool_id}
|
||||||
|
self.client.cast({}, 'delete_pool', **payload)
|
||||||
|
|
||||||
|
def pool_update(self, old_pool, new_pool):
|
||||||
|
pool_dict = new_pool.to_dict()
|
||||||
|
if 'admin_state_up' in pool_dict:
|
||||||
|
pool_dict['enabled'] = pool_dict.pop('admin_state_up')
|
||||||
|
pool_id = pool_dict.pop('pool_id')
|
||||||
|
if 'tls_container_ref' in pool_dict:
|
||||||
|
pool_dict['tls_container_id'] = pool_dict.pop('tls_container_ref')
|
||||||
|
pool_dict.pop('tls_container_data', None)
|
||||||
|
if 'ca_tls_container_ref' in pool_dict:
|
||||||
|
pool_dict['ca_tls_certificate_id'] = pool_dict.pop(
|
||||||
|
'ca_tls_container_ref')
|
||||||
|
pool_dict.pop('ca_tls_container_data', None)
|
||||||
|
if 'client_crl_container_ref' in pool_dict:
|
||||||
|
pool_dict['client_crl_container_id'] = pool_dict.pop(
|
||||||
|
'client_crl_container_ref')
|
||||||
|
pool_dict.pop('client_crl_container_data', None)
|
||||||
|
|
||||||
|
payload = {consts.POOL_ID: pool_id,
|
||||||
|
consts.POOL_UPDATES: pool_dict}
|
||||||
|
self.client.cast({}, 'update_pool', **payload)
|
||||||
|
|
||||||
|
# Member
|
||||||
|
def member_create(self, member):
|
||||||
|
payload = {consts.MEMBER_ID: member.member_id}
|
||||||
|
self.client.cast({}, 'create_member', **payload)
|
||||||
|
|
||||||
|
def member_delete(self, member):
|
||||||
|
member_id = member.member_id
|
||||||
|
payload = {consts.MEMBER_ID: member_id}
|
||||||
|
self.client.cast({}, 'delete_member', **payload)
|
||||||
|
|
||||||
|
def member_update(self, old_member, new_member):
|
||||||
|
member_dict = new_member.to_dict()
|
||||||
|
if 'admin_state_up' in member_dict:
|
||||||
|
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
||||||
|
member_id = member_dict.pop('member_id')
|
||||||
|
|
||||||
|
payload = {consts.MEMBER_ID: member_id,
|
||||||
|
consts.MEMBER_UPDATES: member_dict}
|
||||||
|
self.client.cast({}, 'update_member', **payload)
|
||||||
|
|
||||||
|
def member_batch_update(self, members):
|
||||||
|
# Get a list of existing members
|
||||||
|
pool_id = members[0].pool_id
|
||||||
|
# The DB should not have updated yet, so we can still use the pool
|
||||||
|
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
|
||||||
|
old_members = db_pool.members
|
||||||
|
|
||||||
|
old_member_ids = [m.id for m in old_members]
|
||||||
|
# The driver will always pass objects with IDs.
|
||||||
|
new_member_ids = [m.member_id for m in members]
|
||||||
|
|
||||||
|
# Find members that are brand new or updated
|
||||||
|
new_members = []
|
||||||
|
updated_members = []
|
||||||
|
for m in members:
|
||||||
|
if m.member_id not in old_member_ids:
|
||||||
|
new_members.append(m)
|
||||||
|
else:
|
||||||
|
member_dict = m.to_dict(render_unsets=False)
|
||||||
|
member_dict['id'] = member_dict.pop('member_id')
|
||||||
|
if 'address' in member_dict:
|
||||||
|
member_dict['ip_address'] = member_dict.pop('address')
|
||||||
|
if 'admin_state_up' in member_dict:
|
||||||
|
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
||||||
|
updated_members.append(member_dict)
|
||||||
|
|
||||||
|
# Find members that are deleted
|
||||||
|
deleted_members = []
|
||||||
|
for m in old_members:
|
||||||
|
if m.id not in new_member_ids:
|
||||||
|
deleted_members.append(m)
|
||||||
|
|
||||||
|
payload = {'old_member_ids': [m.id for m in deleted_members],
|
||||||
|
'new_member_ids': [m.member_id for m in new_members],
|
||||||
|
'updated_members': updated_members}
|
||||||
|
self.client.cast({}, 'batch_update_members', **payload)
|
||||||
|
|
||||||
|
# Health Monitor
|
||||||
|
def health_monitor_create(self, healthmonitor):
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
|
||||||
|
self.client.cast({}, 'create_health_monitor', **payload)
|
||||||
|
|
||||||
|
def health_monitor_delete(self, healthmonitor):
|
||||||
|
healthmonitor_id = healthmonitor.healthmonitor_id
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id}
|
||||||
|
self.client.cast({}, 'delete_health_monitor', **payload)
|
||||||
|
|
||||||
|
def health_monitor_update(self, old_healthmonitor, new_healthmonitor):
|
||||||
|
healthmon_dict = new_healthmonitor.to_dict()
|
||||||
|
if 'admin_state_up' in healthmon_dict:
|
||||||
|
healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up')
|
||||||
|
if 'max_retries_down' in healthmon_dict:
|
||||||
|
healthmon_dict['fall_threshold'] = healthmon_dict.pop(
|
||||||
|
'max_retries_down')
|
||||||
|
if 'max_retries' in healthmon_dict:
|
||||||
|
healthmon_dict['rise_threshold'] = healthmon_dict.pop(
|
||||||
|
'max_retries')
|
||||||
|
healthmon_id = healthmon_dict.pop('healthmonitor_id')
|
||||||
|
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: healthmon_id,
|
||||||
|
consts.HEALTH_MONITOR_UPDATES: healthmon_dict}
|
||||||
|
self.client.cast({}, 'update_health_monitor', **payload)
|
||||||
|
|
||||||
|
# L7 Policy
|
||||||
|
def l7policy_create(self, l7policy):
|
||||||
|
payload = {consts.L7POLICY_ID: l7policy.l7policy_id}
|
||||||
|
self.client.cast({}, 'create_l7policy', **payload)
|
||||||
|
|
||||||
|
def l7policy_delete(self, l7policy):
|
||||||
|
l7policy_id = l7policy.l7policy_id
|
||||||
|
payload = {consts.L7POLICY_ID: l7policy_id}
|
||||||
|
self.client.cast({}, 'delete_l7policy', **payload)
|
||||||
|
|
||||||
|
def l7policy_update(self, old_l7policy, new_l7policy):
|
||||||
|
l7policy_dict = new_l7policy.to_dict()
|
||||||
|
if 'admin_state_up' in l7policy_dict:
|
||||||
|
l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up')
|
||||||
|
l7policy_id = l7policy_dict.pop('l7policy_id')
|
||||||
|
|
||||||
|
payload = {consts.L7POLICY_ID: l7policy_id,
|
||||||
|
consts.L7POLICY_UPDATES: l7policy_dict}
|
||||||
|
self.client.cast({}, 'update_l7policy', **payload)
|
||||||
|
|
||||||
|
# L7 Rule
|
||||||
|
def l7rule_create(self, l7rule):
|
||||||
|
payload = {consts.L7RULE_ID: l7rule.l7rule_id}
|
||||||
|
self.client.cast({}, 'create_l7rule', **payload)
|
||||||
|
|
||||||
|
def l7rule_delete(self, l7rule):
|
||||||
|
l7rule_id = l7rule.l7rule_id
|
||||||
|
payload = {consts.L7RULE_ID: l7rule_id}
|
||||||
|
self.client.cast({}, 'delete_l7rule', **payload)
|
||||||
|
|
||||||
|
def l7rule_update(self, old_l7rule, new_l7rule):
|
||||||
|
l7rule_dict = new_l7rule.to_dict()
|
||||||
|
if 'admin_state_up' in l7rule_dict:
|
||||||
|
l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up')
|
||||||
|
l7rule_id = l7rule_dict.pop('l7rule_id')
|
||||||
|
|
||||||
|
payload = {consts.L7RULE_ID: l7rule_id,
|
||||||
|
consts.L7RULE_UPDATES: l7rule_dict}
|
||||||
|
self.client.cast({}, 'update_l7rule', **payload)
|
||||||
|
|
||||||
|
# Flavor
|
||||||
|
def get_supported_flavor_metadata(self):
|
||||||
|
"""Returns the valid flavor metadata keys and descriptions.
|
||||||
|
|
||||||
|
This extracts the valid flavor metadata keys and descriptions
|
||||||
|
from the JSON validation schema and returns it as a dictionary.
|
||||||
|
|
||||||
|
:return: Dictionary of flavor metadata keys and descriptions.
|
||||||
|
:raises DriverError: An unexpected error occurred.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties']
|
||||||
|
return {k: v.get('description', '') for k, v in props.items()}
|
||||||
|
except Exception as e:
|
||||||
|
raise exceptions.DriverError(
|
||||||
|
user_fault_string='Failed to get the supported flavor '
|
||||||
|
'metadata due to: {}'.format(str(e)),
|
||||||
|
operator_fault_string='Failed to get the supported flavor '
|
||||||
|
'metadata due to: {}'.format(str(e)))
|
||||||
|
|
||||||
|
def validate_flavor(self, flavor_dict):
|
||||||
|
"""Validates flavor profile data.
|
||||||
|
|
||||||
|
This will validate a flavor profile dataset against the flavor
|
||||||
|
settings the amphora driver supports.
|
||||||
|
|
||||||
|
:param flavor_dict: The flavor dictionary to validate.
|
||||||
|
:type flavor: dict
|
||||||
|
:return: None
|
||||||
|
:raises DriverError: An unexpected error occurred.
|
||||||
|
:raises UnsupportedOptionError: If the driver does not support
|
||||||
|
one of the flavor settings.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA)
|
||||||
|
except js_exceptions.ValidationError as e:
|
||||||
|
error_object = ''
|
||||||
|
if e.relative_path:
|
||||||
|
error_object = '{} '.format(e.relative_path[0])
|
||||||
|
raise exceptions.UnsupportedOptionError(
|
||||||
|
user_fault_string='{0}{1}'.format(error_object, e.message),
|
||||||
|
operator_fault_string=str(e))
|
||||||
|
except Exception as e:
|
||||||
|
raise exceptions.DriverError(
|
||||||
|
user_fault_string='Failed to validate the flavor metadata '
|
||||||
|
'due to: {}'.format(str(e)),
|
||||||
|
operator_fault_string='Failed to validate the flavor metadata '
|
||||||
|
'due to: {}'.format(str(e)))
|
||||||
|
compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None)
|
||||||
|
if compute_flavor:
|
||||||
|
compute_driver = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.compute.drivers',
|
||||||
|
name=CONF.controller_worker.compute_driver,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
|
||||||
|
# TODO(johnsom) Fix this to raise a NotFound error
|
||||||
|
# when the octavia-lib supports it.
|
||||||
|
compute_driver.validate_flavor(compute_flavor)
|
@ -301,7 +301,9 @@ class ListenersController(base.BaseController):
|
|||||||
|
|
||||||
# re-inject the sni container references lost due to SNI
|
# re-inject the sni container references lost due to SNI
|
||||||
# being a separate table in the DB
|
# being a separate table in the DB
|
||||||
provider_listener.sni_container_refs = listener.sni_container_refs
|
if listener.sni_container_refs != wtypes.Unset:
|
||||||
|
provider_listener.sni_container_refs = (
|
||||||
|
listener.sni_container_refs)
|
||||||
|
|
||||||
# Dispatch to the driver
|
# Dispatch to the driver
|
||||||
LOG.info("Sending create Listener %s to provider %s",
|
LOG.info("Sending create Listener %s to provider %s",
|
||||||
|
@ -20,7 +20,8 @@ from oslo_config import cfg
|
|||||||
from oslo_reports import guru_meditation_report as gmr
|
from oslo_reports import guru_meditation_report as gmr
|
||||||
|
|
||||||
from octavia.common import service as octavia_service
|
from octavia.common import service as octavia_service
|
||||||
from octavia.controller.queue import consumer
|
from octavia.controller.queue.v1 import consumer as consumer_v1
|
||||||
|
from octavia.controller.queue.v2 import consumer as consumer_v2
|
||||||
from octavia import version
|
from octavia import version
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -32,7 +33,9 @@ def main():
|
|||||||
gmr.TextGuruMeditation.setup_autorun(version)
|
gmr.TextGuruMeditation.setup_autorun(version)
|
||||||
|
|
||||||
sm = cotyledon.ServiceManager()
|
sm = cotyledon.ServiceManager()
|
||||||
sm.add(consumer.ConsumerService, workers=CONF.controller_worker.workers,
|
sm.add(consumer_v1.ConsumerService, workers=CONF.controller_worker.workers,
|
||||||
args=(CONF,))
|
args=(CONF,))
|
||||||
|
sm.add(consumer_v2.ConsumerService,
|
||||||
|
workers=CONF.controller_worker.workers, args=(CONF,))
|
||||||
oslo_config_glue.setup(sm, CONF, reload_method="mutate")
|
oslo_config_glue.setup(sm, CONF, reload_method="mutate")
|
||||||
sm.run()
|
sm.run()
|
||||||
|
@ -656,3 +656,5 @@ CLIENT_AUTH_OPTIONAL = 'OPTIONAL'
|
|||||||
CLIENT_AUTH_MANDATORY = 'MANDATORY'
|
CLIENT_AUTH_MANDATORY = 'MANDATORY'
|
||||||
SUPPORTED_CLIENT_AUTH_MODES = [CLIENT_AUTH_NONE, CLIENT_AUTH_OPTIONAL,
|
SUPPORTED_CLIENT_AUTH_MODES = [CLIENT_AUTH_NONE, CLIENT_AUTH_OPTIONAL,
|
||||||
CLIENT_AUTH_MANDATORY]
|
CLIENT_AUTH_MANDATORY]
|
||||||
|
|
||||||
|
TOPIC_AMPHORA_V2 = 'octavia_provisioning_v2'
|
||||||
|
@ -23,7 +23,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker import controller_worker as cw
|
from octavia.controller.worker.v1 import controller_worker as cw
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from sqlalchemy.orm import exc as sqlalchemy_exceptions
|
from sqlalchemy.orm import exc as sqlalchemy_exceptions
|
||||||
|
|
||||||
from octavia.controller.worker import controller_worker as cw
|
from octavia.controller.worker.v1 import controller_worker as cw
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ import oslo_messaging as messaging
|
|||||||
from oslo_messaging.rpc import dispatcher
|
from oslo_messaging.rpc import dispatcher
|
||||||
|
|
||||||
from octavia.common import rpc
|
from octavia.common import rpc
|
||||||
from octavia.controller.queue import endpoint
|
from octavia.controller.queue.v1 import endpoints
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ class ConsumerService(cotyledon.Service):
|
|||||||
LOG.info('Starting consumer...')
|
LOG.info('Starting consumer...')
|
||||||
target = messaging.Target(topic=self.topic, server=self.server,
|
target = messaging.Target(topic=self.topic, server=self.server,
|
||||||
fanout=False)
|
fanout=False)
|
||||||
self.endpoints = [endpoint.Endpoint()]
|
self.endpoints = [endpoints.Endpoints()]
|
||||||
self.message_listener = rpc.get_server(
|
self.message_listener = rpc.get_server(
|
||||||
target, self.endpoints,
|
target, self.endpoints,
|
||||||
executor='threading',
|
executor='threading',
|
@ -24,7 +24,7 @@ CONF = cfg.CONF
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Endpoint(object):
|
class Endpoints(object):
|
||||||
|
|
||||||
# API version history:
|
# API version history:
|
||||||
# 1.0 - Initial version.
|
# 1.0 - Initial version.
|
65
octavia/controller/queue/v2/consumer.py
Normal file
65
octavia/controller/queue/v2/consumer.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# Copyright 2014 Rackspace
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import cotyledon
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
from oslo_messaging.rpc import dispatcher
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import rpc
|
||||||
|
from octavia.controller.queue.v2 import endpoints
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ConsumerService(cotyledon.Service):
|
||||||
|
|
||||||
|
def __init__(self, worker_id, conf):
|
||||||
|
super(ConsumerService, self).__init__(worker_id)
|
||||||
|
self.conf = conf
|
||||||
|
self.topic = constants.TOPIC_AMPHORA_V2
|
||||||
|
self.server = conf.host
|
||||||
|
self.endpoints = []
|
||||||
|
self.access_policy = dispatcher.DefaultRPCAccessPolicy
|
||||||
|
self.message_listener = None
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
LOG.info('Starting V2 consumer...')
|
||||||
|
target = messaging.Target(topic=self.topic, server=self.server,
|
||||||
|
fanout=False)
|
||||||
|
self.endpoints = [endpoints.Endpoints()]
|
||||||
|
self.message_listener = rpc.get_server(
|
||||||
|
target, self.endpoints,
|
||||||
|
executor='threading',
|
||||||
|
access_policy=self.access_policy
|
||||||
|
)
|
||||||
|
self.message_listener.start()
|
||||||
|
|
||||||
|
def terminate(self, graceful=False):
|
||||||
|
if self.message_listener:
|
||||||
|
LOG.info('Stopping V2 consumer...')
|
||||||
|
self.message_listener.stop()
|
||||||
|
if graceful:
|
||||||
|
LOG.info('V2 Consumer successfully stopped. Waiting for '
|
||||||
|
'final messages to be processed...')
|
||||||
|
self.message_listener.wait()
|
||||||
|
if self.endpoints:
|
||||||
|
LOG.info('Shutting down V2 endpoint worker executors...')
|
||||||
|
for e in self.endpoints:
|
||||||
|
try:
|
||||||
|
e.worker.executor.shutdown()
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
super(ConsumerService, self).terminate()
|
156
octavia/controller/queue/v2/endpoints.py
Normal file
156
octavia/controller/queue/v2/endpoints.py
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
# Copyright 2014 Rackspace
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Endpoints(object):
|
||||||
|
|
||||||
|
# API version history:
|
||||||
|
# 1.0 - Initial version.
|
||||||
|
# 2.0 - Provider driver format
|
||||||
|
target = messaging.Target(
|
||||||
|
namespace=constants.RPC_NAMESPACE_CONTROLLER_AGENT,
|
||||||
|
version='2.0')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.worker = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.plugins',
|
||||||
|
name=CONF.octavia_plugins,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
|
||||||
|
def create_load_balancer(self, context, load_balancer_id,
|
||||||
|
flavor=None):
|
||||||
|
LOG.info('Creating load balancer \'%s\'...', load_balancer_id)
|
||||||
|
self.worker.create_load_balancer(load_balancer_id, flavor)
|
||||||
|
|
||||||
|
def update_load_balancer(self, context, load_balancer_id,
|
||||||
|
load_balancer_updates):
|
||||||
|
LOG.info('Updating load balancer \'%s\'...', load_balancer_id)
|
||||||
|
self.worker.update_load_balancer(load_balancer_id,
|
||||||
|
load_balancer_updates)
|
||||||
|
|
||||||
|
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
|
||||||
|
LOG.info('Deleting load balancer \'%s\'...', load_balancer_id)
|
||||||
|
self.worker.delete_load_balancer(load_balancer_id, cascade)
|
||||||
|
|
||||||
|
def failover_load_balancer(self, context, load_balancer_id):
|
||||||
|
LOG.info('Failing over amphora in load balancer \'%s\'...',
|
||||||
|
load_balancer_id)
|
||||||
|
self.worker.failover_loadbalancer(load_balancer_id)
|
||||||
|
|
||||||
|
def failover_amphora(self, context, amphora_id):
|
||||||
|
LOG.info('Failing over amphora \'%s\'...',
|
||||||
|
amphora_id)
|
||||||
|
self.worker.failover_amphora(amphora_id)
|
||||||
|
|
||||||
|
def create_listener(self, context, listener_id):
|
||||||
|
LOG.info('Creating listener \'%s\'...', listener_id)
|
||||||
|
self.worker.create_listener(listener_id)
|
||||||
|
|
||||||
|
def update_listener(self, context, listener_id, listener_updates):
|
||||||
|
LOG.info('Updating listener \'%s\'...', listener_id)
|
||||||
|
self.worker.update_listener(listener_id, listener_updates)
|
||||||
|
|
||||||
|
def delete_listener(self, context, listener_id):
|
||||||
|
LOG.info('Deleting listener \'%s\'...', listener_id)
|
||||||
|
self.worker.delete_listener(listener_id)
|
||||||
|
|
||||||
|
def create_pool(self, context, pool_id):
|
||||||
|
LOG.info('Creating pool \'%s\'...', pool_id)
|
||||||
|
self.worker.create_pool(pool_id)
|
||||||
|
|
||||||
|
def update_pool(self, context, pool_id, pool_updates):
|
||||||
|
LOG.info('Updating pool \'%s\'...', pool_id)
|
||||||
|
self.worker.update_pool(pool_id, pool_updates)
|
||||||
|
|
||||||
|
def delete_pool(self, context, pool_id):
|
||||||
|
LOG.info('Deleting pool \'%s\'...', pool_id)
|
||||||
|
self.worker.delete_pool(pool_id)
|
||||||
|
|
||||||
|
def create_health_monitor(self, context, health_monitor_id):
|
||||||
|
LOG.info('Creating health monitor \'%s\'...', health_monitor_id)
|
||||||
|
self.worker.create_health_monitor(health_monitor_id)
|
||||||
|
|
||||||
|
def update_health_monitor(self, context, health_monitor_id,
|
||||||
|
health_monitor_updates):
|
||||||
|
LOG.info('Updating health monitor \'%s\'...', health_monitor_id)
|
||||||
|
self.worker.update_health_monitor(health_monitor_id,
|
||||||
|
health_monitor_updates)
|
||||||
|
|
||||||
|
def delete_health_monitor(self, context, health_monitor_id):
|
||||||
|
LOG.info('Deleting health monitor \'%s\'...', health_monitor_id)
|
||||||
|
self.worker.delete_health_monitor(health_monitor_id)
|
||||||
|
|
||||||
|
def create_member(self, context, member_id):
|
||||||
|
LOG.info('Creating member \'%s\'...', member_id)
|
||||||
|
self.worker.create_member(member_id)
|
||||||
|
|
||||||
|
def update_member(self, context, member_id, member_updates):
|
||||||
|
LOG.info('Updating member \'%s\'...', member_id)
|
||||||
|
self.worker.update_member(member_id, member_updates)
|
||||||
|
|
||||||
|
def batch_update_members(self, context, old_member_ids, new_member_ids,
|
||||||
|
updated_members):
|
||||||
|
updated_member_ids = [m.get('id') for m in updated_members]
|
||||||
|
LOG.info(
|
||||||
|
'Batch updating members: old=\'%(old)s\', new=\'%(new)s\', '
|
||||||
|
'updated=\'%(updated)s\'...',
|
||||||
|
{'old': old_member_ids, 'new': new_member_ids,
|
||||||
|
'updated': updated_member_ids})
|
||||||
|
self.worker.batch_update_members(
|
||||||
|
old_member_ids, new_member_ids, updated_members)
|
||||||
|
|
||||||
|
def delete_member(self, context, member_id):
|
||||||
|
LOG.info('Deleting member \'%s\'...', member_id)
|
||||||
|
self.worker.delete_member(member_id)
|
||||||
|
|
||||||
|
def create_l7policy(self, context, l7policy_id):
|
||||||
|
LOG.info('Creating l7policy \'%s\'...', l7policy_id)
|
||||||
|
self.worker.create_l7policy(l7policy_id)
|
||||||
|
|
||||||
|
def update_l7policy(self, context, l7policy_id, l7policy_updates):
|
||||||
|
LOG.info('Updating l7policy \'%s\'...', l7policy_id)
|
||||||
|
self.worker.update_l7policy(l7policy_id, l7policy_updates)
|
||||||
|
|
||||||
|
def delete_l7policy(self, context, l7policy_id):
|
||||||
|
LOG.info('Deleting l7policy \'%s\'...', l7policy_id)
|
||||||
|
self.worker.delete_l7policy(l7policy_id)
|
||||||
|
|
||||||
|
def create_l7rule(self, context, l7rule_id):
|
||||||
|
LOG.info('Creating l7rule \'%s\'...', l7rule_id)
|
||||||
|
self.worker.create_l7rule(l7rule_id)
|
||||||
|
|
||||||
|
def update_l7rule(self, context, l7rule_id, l7rule_updates):
|
||||||
|
LOG.info('Updating l7rule \'%s\'...', l7rule_id)
|
||||||
|
self.worker.update_l7rule(l7rule_id, l7rule_updates)
|
||||||
|
|
||||||
|
def delete_l7rule(self, context, l7rule_id):
|
||||||
|
LOG.info('Deleting l7rule \'%s\'...', l7rule_id)
|
||||||
|
self.worker.delete_l7rule(l7rule_id)
|
||||||
|
|
||||||
|
def update_amphora_agent_config(self, context, amphora_id):
|
||||||
|
LOG.info('Updating amphora \'%s\' agent configuration...',
|
||||||
|
amphora_id)
|
||||||
|
self.worker.update_amphora_agent_config(amphora_id)
|
11
octavia/controller/worker/v1/__init__.py
Normal file
11
octavia/controller/worker/v1/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -23,14 +23,14 @@ import tenacity
|
|||||||
|
|
||||||
from octavia.common import base_taskflow
|
from octavia.common import base_taskflow
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import amphora_flows
|
from octavia.controller.worker.v1.flows import amphora_flows
|
||||||
from octavia.controller.worker.flows import health_monitor_flows
|
from octavia.controller.worker.v1.flows import health_monitor_flows
|
||||||
from octavia.controller.worker.flows import l7policy_flows
|
from octavia.controller.worker.v1.flows import l7policy_flows
|
||||||
from octavia.controller.worker.flows import l7rule_flows
|
from octavia.controller.worker.v1.flows import l7rule_flows
|
||||||
from octavia.controller.worker.flows import listener_flows
|
from octavia.controller.worker.v1.flows import listener_flows
|
||||||
from octavia.controller.worker.flows import load_balancer_flows
|
from octavia.controller.worker.v1.flows import load_balancer_flows
|
||||||
from octavia.controller.worker.flows import member_flows
|
from octavia.controller.worker.v1.flows import member_flows
|
||||||
from octavia.controller.worker.flows import pool_flows
|
from octavia.controller.worker.v1.flows import pool_flows
|
||||||
from octavia.db import api as db_apis
|
from octavia.db import api as db_apis
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
|
|
11
octavia/controller/worker/v1/flows/__init__.py
Normal file
11
octavia/controller/worker/v1/flows/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -19,12 +19,12 @@ from taskflow.patterns import linear_flow
|
|||||||
from taskflow.patterns import unordered_flow
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import cert_task
|
from octavia.controller.worker.v1.tasks import cert_task
|
||||||
from octavia.controller.worker.tasks import compute_tasks
|
from octavia.controller.worker.v1.tasks import compute_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
@ -16,10 +16,10 @@
|
|||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
class HealthMonitorFlows(object):
|
class HealthMonitorFlows(object):
|
@ -16,10 +16,10 @@
|
|||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
class L7PolicyFlows(object):
|
class L7PolicyFlows(object):
|
@ -16,10 +16,10 @@
|
|||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
class L7RuleFlows(object):
|
class L7RuleFlows(object):
|
@ -16,10 +16,10 @@
|
|||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
|
|
||||||
|
|
||||||
class ListenerFlows(object):
|
class ListenerFlows(object):
|
@ -20,15 +20,15 @@ from taskflow.patterns import unordered_flow
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.controller.worker.flows import amphora_flows
|
from octavia.controller.worker.v1.flows import amphora_flows
|
||||||
from octavia.controller.worker.flows import listener_flows
|
from octavia.controller.worker.v1.flows import listener_flows
|
||||||
from octavia.controller.worker.flows import member_flows
|
from octavia.controller.worker.v1.flows import member_flows
|
||||||
from octavia.controller.worker.flows import pool_flows
|
from octavia.controller.worker.v1.flows import pool_flows
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import compute_tasks
|
from octavia.controller.worker.v1.tasks import compute_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
@ -17,11 +17,11 @@ from taskflow.patterns import linear_flow
|
|||||||
from taskflow.patterns import unordered_flow
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
|
|
||||||
|
|
||||||
class MemberFlows(object):
|
class MemberFlows(object):
|
@ -16,10 +16,10 @@
|
|||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
class PoolFlows(object):
|
class PoolFlows(object):
|
11
octavia/controller/worker/v1/tasks/__init__.py
Normal file
11
octavia/controller/worker/v1/tasks/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
11
octavia/controller/worker/v2/__init__.py
Normal file
11
octavia/controller/worker/v2/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
964
octavia/controller/worker/v2/controller_worker.py
Normal file
964
octavia/controller/worker/v2/controller_worker.py
Normal file
@ -0,0 +1,964 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import excutils
|
||||||
|
from sqlalchemy.orm import exc as db_exceptions
|
||||||
|
from taskflow.listeners import logging as tf_logging
|
||||||
|
import tenacity
|
||||||
|
|
||||||
|
from octavia.common import base_taskflow
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import amphora_flows
|
||||||
|
from octavia.controller.worker.v2.flows import health_monitor_flows
|
||||||
|
from octavia.controller.worker.v2.flows import l7policy_flows
|
||||||
|
from octavia.controller.worker.v2.flows import l7rule_flows
|
||||||
|
from octavia.controller.worker.v2.flows import listener_flows
|
||||||
|
from octavia.controller.worker.v2.flows import load_balancer_flows
|
||||||
|
from octavia.controller.worker.v2.flows import member_flows
|
||||||
|
from octavia.controller.worker.v2.flows import pool_flows
|
||||||
|
from octavia.db import api as db_apis
|
||||||
|
from octavia.db import repositories as repo
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
RETRY_ATTEMPTS = 15
|
||||||
|
RETRY_INITIAL_DELAY = 1
|
||||||
|
RETRY_BACKOFF = 1
|
||||||
|
RETRY_MAX = 5
|
||||||
|
|
||||||
|
|
||||||
|
def _is_provisioning_status_pending_update(lb_obj):
|
||||||
|
return not lb_obj.provisioning_status == constants.PENDING_UPDATE
|
||||||
|
|
||||||
|
|
||||||
|
class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self._amphora_flows = amphora_flows.AmphoraFlows()
|
||||||
|
self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows()
|
||||||
|
self._lb_flows = load_balancer_flows.LoadBalancerFlows()
|
||||||
|
self._listener_flows = listener_flows.ListenerFlows()
|
||||||
|
self._member_flows = member_flows.MemberFlows()
|
||||||
|
self._pool_flows = pool_flows.PoolFlows()
|
||||||
|
self._l7policy_flows = l7policy_flows.L7PolicyFlows()
|
||||||
|
self._l7rule_flows = l7rule_flows.L7RuleFlows()
|
||||||
|
|
||||||
|
self._amphora_repo = repo.AmphoraRepository()
|
||||||
|
self._amphora_health_repo = repo.AmphoraHealthRepository()
|
||||||
|
self._health_mon_repo = repo.HealthMonitorRepository()
|
||||||
|
self._lb_repo = repo.LoadBalancerRepository()
|
||||||
|
self._listener_repo = repo.ListenerRepository()
|
||||||
|
self._member_repo = repo.MemberRepository()
|
||||||
|
self._pool_repo = repo.PoolRepository()
|
||||||
|
self._l7policy_repo = repo.L7PolicyRepository()
|
||||||
|
self._l7rule_repo = repo.L7RuleRepository()
|
||||||
|
self._flavor_repo = repo.FlavorRepository()
|
||||||
|
|
||||||
|
super(ControllerWorker, self).__init__()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=(
|
||||||
|
tenacity.retry_if_result(_is_provisioning_status_pending_update) |
|
||||||
|
tenacity.retry_if_exception_type()),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def _get_db_obj_until_pending_update(self, repo, id):
|
||||||
|
|
||||||
|
return repo.get(db_apis.get_session(), id=id)
|
||||||
|
|
||||||
|
def create_amphora(self):
|
||||||
|
"""Creates an Amphora.
|
||||||
|
|
||||||
|
This is used to create spare amphora.
|
||||||
|
|
||||||
|
:returns: amphora_id
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
create_amp_tf = self._taskflow_load(
|
||||||
|
self._amphora_flows.get_create_amphora_flow(),
|
||||||
|
store={constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_SPARES_POOL_PRIORITY,
|
||||||
|
constants.FLAVOR: None}
|
||||||
|
)
|
||||||
|
with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
|
||||||
|
create_amp_tf.run()
|
||||||
|
|
||||||
|
return create_amp_tf.storage.fetch('amphora')
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to create an amphora due to: {}'.format(str(e)))
|
||||||
|
|
||||||
|
def delete_amphora(self, amphora_id):
|
||||||
|
"""Deletes an existing Amphora.
|
||||||
|
|
||||||
|
:param amphora_id: ID of the amphora to delete
|
||||||
|
:returns: None
|
||||||
|
:raises AmphoraNotFound: The referenced Amphora was not found
|
||||||
|
"""
|
||||||
|
amphora = self._amphora_repo.get(db_apis.get_session(),
|
||||||
|
id=amphora_id)
|
||||||
|
delete_amp_tf = self._taskflow_load(self._amphora_flows.
|
||||||
|
get_delete_amphora_flow(),
|
||||||
|
store={constants.AMPHORA: amphora})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_amp_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_amp_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_health_monitor(self, health_monitor_id):
|
||||||
|
"""Creates a health monitor.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to create a health monitor on
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
health_mon = self._health_mon_repo.get(db_apis.get_session(),
|
||||||
|
id=health_monitor_id)
|
||||||
|
if not health_mon:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'health_monitor', health_monitor_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
pool = health_mon.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
pool.health_monitor = health_mon
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
create_hm_tf = self._taskflow_load(
|
||||||
|
self._health_monitor_flows.get_create_health_monitor_flow(),
|
||||||
|
store={constants.HEALTH_MON: health_mon,
|
||||||
|
constants.POOL: pool,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_hm_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_hm_tf.run()
|
||||||
|
|
||||||
|
def delete_health_monitor(self, health_monitor_id):
|
||||||
|
"""Deletes a health monitor.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to delete its health monitor
|
||||||
|
:returns: None
|
||||||
|
:raises HMNotFound: The referenced health monitor was not found
|
||||||
|
"""
|
||||||
|
health_mon = self._health_mon_repo.get(db_apis.get_session(),
|
||||||
|
id=health_monitor_id)
|
||||||
|
|
||||||
|
pool = health_mon.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
delete_hm_tf = self._taskflow_load(
|
||||||
|
self._health_monitor_flows.get_delete_health_monitor_flow(),
|
||||||
|
store={constants.HEALTH_MON: health_mon,
|
||||||
|
constants.POOL: pool,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_hm_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_hm_tf.run()
|
||||||
|
|
||||||
|
def update_health_monitor(self, health_monitor_id, health_monitor_updates):
|
||||||
|
"""Updates a health monitor.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to have it's health monitor updated
|
||||||
|
:param health_monitor_updates: Dict containing updated health monitor
|
||||||
|
:returns: None
|
||||||
|
:raises HMNotFound: The referenced health monitor was not found
|
||||||
|
"""
|
||||||
|
health_mon = None
|
||||||
|
try:
|
||||||
|
health_mon = self._get_db_obj_until_pending_update(
|
||||||
|
self._health_mon_repo, health_monitor_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('Health monitor did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
health_mon = e.last_attempt.result()
|
||||||
|
|
||||||
|
pool = health_mon.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
pool.health_monitor = health_mon
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
update_hm_tf = self._taskflow_load(
|
||||||
|
self._health_monitor_flows.get_update_health_monitor_flow(),
|
||||||
|
store={constants.HEALTH_MON: health_mon,
|
||||||
|
constants.POOL: pool,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer,
|
||||||
|
constants.UPDATE_DICT: health_monitor_updates})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_hm_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_hm_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_listener(self, listener_id):
|
||||||
|
"""Creates a listener.
|
||||||
|
|
||||||
|
:param listener_id: ID of the listener to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
listener = self._listener_repo.get(db_apis.get_session(),
|
||||||
|
id=listener_id)
|
||||||
|
if not listener:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'listener', listener_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
load_balancer = listener.load_balancer
|
||||||
|
|
||||||
|
create_listener_tf = self._taskflow_load(self._listener_flows.
|
||||||
|
get_create_listener_flow(),
|
||||||
|
store={constants.LOADBALANCER:
|
||||||
|
load_balancer,
|
||||||
|
constants.LISTENERS:
|
||||||
|
[listener]})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_listener_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_listener_tf.run()
|
||||||
|
|
||||||
|
def delete_listener(self, listener_id):
|
||||||
|
"""Deletes a listener.
|
||||||
|
|
||||||
|
:param listener_id: ID of the listener to delete
|
||||||
|
:returns: None
|
||||||
|
:raises ListenerNotFound: The referenced listener was not found
|
||||||
|
"""
|
||||||
|
listener = self._listener_repo.get(db_apis.get_session(),
|
||||||
|
id=listener_id)
|
||||||
|
load_balancer = listener.load_balancer
|
||||||
|
|
||||||
|
delete_listener_tf = self._taskflow_load(
|
||||||
|
self._listener_flows.get_delete_listener_flow(),
|
||||||
|
store={constants.LOADBALANCER: load_balancer,
|
||||||
|
constants.LISTENER: listener})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_listener_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_listener_tf.run()
|
||||||
|
|
||||||
|
def update_listener(self, listener_id, listener_updates):
|
||||||
|
"""Updates a listener.
|
||||||
|
|
||||||
|
:param listener_id: ID of the listener to update
|
||||||
|
:param listener_updates: Dict containing updated listener attributes
|
||||||
|
:returns: None
|
||||||
|
:raises ListenerNotFound: The referenced listener was not found
|
||||||
|
"""
|
||||||
|
listener = None
|
||||||
|
try:
|
||||||
|
listener = self._get_db_obj_until_pending_update(
|
||||||
|
self._listener_repo, listener_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('Listener did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
listener = e.last_attempt.result()
|
||||||
|
|
||||||
|
load_balancer = listener.load_balancer
|
||||||
|
|
||||||
|
update_listener_tf = self._taskflow_load(self._listener_flows.
|
||||||
|
get_update_listener_flow(),
|
||||||
|
store={constants.LISTENER:
|
||||||
|
listener,
|
||||||
|
constants.LOADBALANCER:
|
||||||
|
load_balancer,
|
||||||
|
constants.UPDATE_DICT:
|
||||||
|
listener_updates,
|
||||||
|
constants.LISTENERS:
|
||||||
|
[listener]})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
|
||||||
|
update_listener_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_load_balancer(self, load_balancer_id, flavor=None):
|
||||||
|
"""Creates a load balancer by allocating Amphorae.
|
||||||
|
|
||||||
|
First tries to allocate an existing Amphora in READY state.
|
||||||
|
If none are available it will attempt to build one specifically
|
||||||
|
for this load balancer.
|
||||||
|
|
||||||
|
:param load_balancer_id: ID of the load balancer to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
|
||||||
|
if not lb:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'load_balancer', load_balancer_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
# TODO(johnsom) convert this to octavia_lib constant flavor
|
||||||
|
# once octavia is transitioned to use octavia_lib
|
||||||
|
store = {constants.LOADBALANCER_ID: load_balancer_id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
|
constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
|
constants.FLAVOR: flavor}
|
||||||
|
|
||||||
|
topology = lb.topology
|
||||||
|
|
||||||
|
store[constants.UPDATE_DICT] = {
|
||||||
|
constants.TOPOLOGY: topology
|
||||||
|
}
|
||||||
|
|
||||||
|
create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
|
||||||
|
topology=topology, listeners=lb.listeners)
|
||||||
|
|
||||||
|
create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
|
||||||
|
with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
|
||||||
|
create_lb_tf.run()
|
||||||
|
|
||||||
|
def delete_load_balancer(self, load_balancer_id, cascade=False):
|
||||||
|
"""Deletes a load balancer by de-allocating Amphorae.
|
||||||
|
|
||||||
|
:param load_balancer_id: ID of the load balancer to delete
|
||||||
|
:returns: None
|
||||||
|
:raises LBNotFound: The referenced load balancer was not found
|
||||||
|
"""
|
||||||
|
lb = self._lb_repo.get(db_apis.get_session(),
|
||||||
|
id=load_balancer_id)
|
||||||
|
|
||||||
|
if cascade:
|
||||||
|
(flow,
|
||||||
|
store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb)
|
||||||
|
else:
|
||||||
|
(flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb)
|
||||||
|
store.update({constants.LOADBALANCER: lb,
|
||||||
|
constants.SERVER_GROUP_ID: lb.server_group_id})
|
||||||
|
delete_lb_tf = self._taskflow_load(flow, store=store)
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_lb_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_lb_tf.run()
|
||||||
|
|
||||||
|
def update_load_balancer(self, load_balancer_id, load_balancer_updates):
|
||||||
|
"""Updates a load balancer.
|
||||||
|
|
||||||
|
:param load_balancer_id: ID of the load balancer to update
|
||||||
|
:param load_balancer_updates: Dict containing updated load balancer
|
||||||
|
:returns: None
|
||||||
|
:raises LBNotFound: The referenced load balancer was not found
|
||||||
|
"""
|
||||||
|
lb = None
|
||||||
|
try:
|
||||||
|
lb = self._get_db_obj_until_pending_update(
|
||||||
|
self._lb_repo, load_balancer_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('Load balancer did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
lb = e.last_attempt.result()
|
||||||
|
|
||||||
|
listeners, _ = self._listener_repo.get_all(
|
||||||
|
db_apis.get_session(),
|
||||||
|
load_balancer_id=load_balancer_id)
|
||||||
|
|
||||||
|
update_lb_tf = self._taskflow_load(
|
||||||
|
self._lb_flows.get_update_load_balancer_flow(),
|
||||||
|
store={constants.LOADBALANCER: lb,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.UPDATE_DICT: load_balancer_updates})
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(update_lb_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_lb_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_member(self, member_id):
|
||||||
|
"""Creates a pool member.
|
||||||
|
|
||||||
|
:param member_id: ID of the member to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoSuitablePool: Unable to find the node pool
|
||||||
|
"""
|
||||||
|
member = self._member_repo.get(db_apis.get_session(),
|
||||||
|
id=member_id)
|
||||||
|
if not member:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'member', member_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
pool = member.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
create_member_tf = self._taskflow_load(self._member_flows.
|
||||||
|
get_create_member_flow(),
|
||||||
|
store={constants.MEMBER: member,
|
||||||
|
constants.LISTENERS:
|
||||||
|
listeners,
|
||||||
|
constants.LOADBALANCER:
|
||||||
|
load_balancer,
|
||||||
|
constants.POOL: pool})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_member_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_member_tf.run()
|
||||||
|
|
||||||
|
def delete_member(self, member_id):
|
||||||
|
"""Deletes a pool member.
|
||||||
|
|
||||||
|
:param member_id: ID of the member to delete
|
||||||
|
:returns: None
|
||||||
|
:raises MemberNotFound: The referenced member was not found
|
||||||
|
"""
|
||||||
|
member = self._member_repo.get(db_apis.get_session(),
|
||||||
|
id=member_id)
|
||||||
|
pool = member.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
delete_member_tf = self._taskflow_load(
|
||||||
|
self._member_flows.get_delete_member_flow(),
|
||||||
|
store={constants.MEMBER: member, constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer, constants.POOL: pool}
|
||||||
|
)
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_member_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_member_tf.run()
|
||||||
|
|
||||||
|
def batch_update_members(self, old_member_ids, new_member_ids,
|
||||||
|
updated_members):
|
||||||
|
old_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
||||||
|
for mid in old_member_ids]
|
||||||
|
new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
||||||
|
for mid in new_member_ids]
|
||||||
|
updated_members = [
|
||||||
|
(self._member_repo.get(db_apis.get_session(), id=m.get('id')), m)
|
||||||
|
for m in updated_members]
|
||||||
|
if old_members:
|
||||||
|
pool = old_members[0].pool
|
||||||
|
elif new_members:
|
||||||
|
pool = new_members[0].pool
|
||||||
|
else:
|
||||||
|
pool = updated_members[0][0].pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
batch_update_members_tf = self._taskflow_load(
|
||||||
|
self._member_flows.get_batch_update_members_flow(
|
||||||
|
old_members, new_members, updated_members),
|
||||||
|
store={constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer,
|
||||||
|
constants.POOL: pool})
|
||||||
|
with tf_logging.DynamicLoggingListener(batch_update_members_tf,
|
||||||
|
log=LOG):
|
||||||
|
batch_update_members_tf.run()
|
||||||
|
|
||||||
|
def update_member(self, member_id, member_updates):
|
||||||
|
"""Updates a pool member.
|
||||||
|
|
||||||
|
:param member_id: ID of the member to update
|
||||||
|
:param member_updates: Dict containing updated member attributes
|
||||||
|
:returns: None
|
||||||
|
:raises MemberNotFound: The referenced member was not found
|
||||||
|
"""
|
||||||
|
member = None
|
||||||
|
try:
|
||||||
|
member = self._get_db_obj_until_pending_update(
|
||||||
|
self._member_repo, member_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('Member did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
member = e.last_attempt.result()
|
||||||
|
|
||||||
|
pool = member.pool
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
update_member_tf = self._taskflow_load(self._member_flows.
|
||||||
|
get_update_member_flow(),
|
||||||
|
store={constants.MEMBER: member,
|
||||||
|
constants.LISTENERS:
|
||||||
|
listeners,
|
||||||
|
constants.LOADBALANCER:
|
||||||
|
load_balancer,
|
||||||
|
constants.POOL:
|
||||||
|
pool,
|
||||||
|
constants.UPDATE_DICT:
|
||||||
|
member_updates})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_member_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_member_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_pool(self, pool_id):
|
||||||
|
"""Creates a node pool.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
pool = self._pool_repo.get(db_apis.get_session(),
|
||||||
|
id=pool_id)
|
||||||
|
if not pool:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'pool', pool_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
create_pool_tf = self._taskflow_load(self._pool_flows.
|
||||||
|
get_create_pool_flow(),
|
||||||
|
store={constants.POOL: pool,
|
||||||
|
constants.LISTENERS:
|
||||||
|
listeners,
|
||||||
|
constants.LOADBALANCER:
|
||||||
|
load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_pool_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_pool_tf.run()
|
||||||
|
|
||||||
|
def delete_pool(self, pool_id):
|
||||||
|
"""Deletes a node pool.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to delete
|
||||||
|
:returns: None
|
||||||
|
:raises PoolNotFound: The referenced pool was not found
|
||||||
|
"""
|
||||||
|
pool = self._pool_repo.get(db_apis.get_session(),
|
||||||
|
id=pool_id)
|
||||||
|
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
listeners = pool.listeners
|
||||||
|
|
||||||
|
delete_pool_tf = self._taskflow_load(
|
||||||
|
self._pool_flows.get_delete_pool_flow(),
|
||||||
|
store={constants.POOL: pool, constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_pool_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_pool_tf.run()
|
||||||
|
|
||||||
|
def update_pool(self, pool_id, pool_updates):
|
||||||
|
"""Updates a node pool.
|
||||||
|
|
||||||
|
:param pool_id: ID of the pool to update
|
||||||
|
:param pool_updates: Dict containing updated pool attributes
|
||||||
|
:returns: None
|
||||||
|
:raises PoolNotFound: The referenced pool was not found
|
||||||
|
"""
|
||||||
|
pool = None
|
||||||
|
try:
|
||||||
|
pool = self._get_db_obj_until_pending_update(
|
||||||
|
self._pool_repo, pool_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('Pool did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
pool = e.last_attempt.result()
|
||||||
|
|
||||||
|
listeners = pool.listeners
|
||||||
|
load_balancer = pool.load_balancer
|
||||||
|
|
||||||
|
update_pool_tf = self._taskflow_load(self._pool_flows.
|
||||||
|
get_update_pool_flow(),
|
||||||
|
store={constants.POOL: pool,
|
||||||
|
constants.LISTENERS:
|
||||||
|
listeners,
|
||||||
|
constants.LOADBALANCER:
|
||||||
|
load_balancer,
|
||||||
|
constants.UPDATE_DICT:
|
||||||
|
pool_updates})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_pool_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_pool_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_l7policy(self, l7policy_id):
|
||||||
|
"""Creates an L7 Policy.
|
||||||
|
|
||||||
|
:param l7policy_id: ID of the l7policy to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
l7policy = self._l7policy_repo.get(db_apis.get_session(),
|
||||||
|
id=l7policy_id)
|
||||||
|
if not l7policy:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'l7policy', l7policy_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
|
||||||
|
create_l7policy_tf = self._taskflow_load(
|
||||||
|
self._l7policy_flows.get_create_l7policy_flow(),
|
||||||
|
store={constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_l7policy_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_l7policy_tf.run()
|
||||||
|
|
||||||
|
def delete_l7policy(self, l7policy_id):
|
||||||
|
"""Deletes an L7 policy.
|
||||||
|
|
||||||
|
:param l7policy_id: ID of the l7policy to delete
|
||||||
|
:returns: None
|
||||||
|
:raises L7PolicyNotFound: The referenced l7policy was not found
|
||||||
|
"""
|
||||||
|
l7policy = self._l7policy_repo.get(db_apis.get_session(),
|
||||||
|
id=l7policy_id)
|
||||||
|
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
|
||||||
|
delete_l7policy_tf = self._taskflow_load(
|
||||||
|
self._l7policy_flows.get_delete_l7policy_flow(),
|
||||||
|
store={constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_l7policy_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_l7policy_tf.run()
|
||||||
|
|
||||||
|
def update_l7policy(self, l7policy_id, l7policy_updates):
|
||||||
|
"""Updates an L7 policy.
|
||||||
|
|
||||||
|
:param l7policy_id: ID of the l7policy to update
|
||||||
|
:param l7policy_updates: Dict containing updated l7policy attributes
|
||||||
|
:returns: None
|
||||||
|
:raises L7PolicyNotFound: The referenced l7policy was not found
|
||||||
|
"""
|
||||||
|
l7policy = None
|
||||||
|
try:
|
||||||
|
l7policy = self._get_db_obj_until_pending_update(
|
||||||
|
self._l7policy_repo, l7policy_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('L7 policy did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
l7policy = e.last_attempt.result()
|
||||||
|
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
|
||||||
|
update_l7policy_tf = self._taskflow_load(
|
||||||
|
self._l7policy_flows.get_update_l7policy_flow(),
|
||||||
|
store={constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer,
|
||||||
|
constants.UPDATE_DICT: l7policy_updates})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_l7policy_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_l7policy_tf.run()
|
||||||
|
|
||||||
|
@tenacity.retry(
|
||||||
|
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
||||||
|
wait=tenacity.wait_incrementing(
|
||||||
|
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
|
||||||
|
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
|
||||||
|
def create_l7rule(self, l7rule_id):
|
||||||
|
"""Creates an L7 Rule.
|
||||||
|
|
||||||
|
:param l7rule_id: ID of the l7rule to create
|
||||||
|
:returns: None
|
||||||
|
:raises NoResultFound: Unable to find the object
|
||||||
|
"""
|
||||||
|
l7rule = self._l7rule_repo.get(db_apis.get_session(),
|
||||||
|
id=l7rule_id)
|
||||||
|
if not l7rule:
|
||||||
|
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
||||||
|
'60 seconds.', 'l7rule', l7rule_id)
|
||||||
|
raise db_exceptions.NoResultFound
|
||||||
|
|
||||||
|
l7policy = l7rule.l7policy
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
|
||||||
|
create_l7rule_tf = self._taskflow_load(
|
||||||
|
self._l7rule_flows.get_create_l7rule_flow(),
|
||||||
|
store={constants.L7RULE: l7rule,
|
||||||
|
constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(create_l7rule_tf,
|
||||||
|
log=LOG):
|
||||||
|
create_l7rule_tf.run()
|
||||||
|
|
||||||
|
def delete_l7rule(self, l7rule_id):
|
||||||
|
"""Deletes an L7 rule.
|
||||||
|
|
||||||
|
:param l7rule_id: ID of the l7rule to delete
|
||||||
|
:returns: None
|
||||||
|
:raises L7RuleNotFound: The referenced l7rule was not found
|
||||||
|
"""
|
||||||
|
l7rule = self._l7rule_repo.get(db_apis.get_session(),
|
||||||
|
id=l7rule_id)
|
||||||
|
l7policy = l7rule.l7policy
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
|
||||||
|
delete_l7rule_tf = self._taskflow_load(
|
||||||
|
self._l7rule_flows.get_delete_l7rule_flow(),
|
||||||
|
store={constants.L7RULE: l7rule,
|
||||||
|
constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer})
|
||||||
|
with tf_logging.DynamicLoggingListener(delete_l7rule_tf,
|
||||||
|
log=LOG):
|
||||||
|
delete_l7rule_tf.run()
|
||||||
|
|
||||||
|
def update_l7rule(self, l7rule_id, l7rule_updates):
|
||||||
|
"""Updates an L7 rule.
|
||||||
|
|
||||||
|
:param l7rule_id: ID of the l7rule to update
|
||||||
|
:param l7rule_updates: Dict containing updated l7rule attributes
|
||||||
|
:returns: None
|
||||||
|
:raises L7RuleNotFound: The referenced l7rule was not found
|
||||||
|
"""
|
||||||
|
l7rule = None
|
||||||
|
try:
|
||||||
|
l7rule = self._get_db_obj_until_pending_update(
|
||||||
|
self._l7rule_repo, l7rule_id)
|
||||||
|
except tenacity.RetryError as e:
|
||||||
|
LOG.warning('L7 rule did not go into %s in 60 seconds. '
|
||||||
|
'This either due to an in-progress Octavia upgrade '
|
||||||
|
'or an overloaded and failing database. Assuming '
|
||||||
|
'an upgrade is in progress and continuing.',
|
||||||
|
constants.PENDING_UPDATE)
|
||||||
|
l7rule = e.last_attempt.result()
|
||||||
|
|
||||||
|
l7policy = l7rule.l7policy
|
||||||
|
listeners = [l7policy.listener]
|
||||||
|
load_balancer = l7policy.listener.load_balancer
|
||||||
|
|
||||||
|
update_l7rule_tf = self._taskflow_load(
|
||||||
|
self._l7rule_flows.get_update_l7rule_flow(),
|
||||||
|
store={constants.L7RULE: l7rule,
|
||||||
|
constants.L7POLICY: l7policy,
|
||||||
|
constants.LISTENERS: listeners,
|
||||||
|
constants.LOADBALANCER: load_balancer,
|
||||||
|
constants.UPDATE_DICT: l7rule_updates})
|
||||||
|
with tf_logging.DynamicLoggingListener(update_l7rule_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_l7rule_tf.run()
|
||||||
|
|
||||||
|
def _perform_amphora_failover(self, amp, priority):
|
||||||
|
"""Internal method to perform failover operations for an amphora.
|
||||||
|
|
||||||
|
:param amp: The amphora to failover
|
||||||
|
:param priority: The create priority
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
|
||||||
|
stored_params = {constants.FAILED_AMPHORA: amp,
|
||||||
|
constants.LOADBALANCER_ID: amp.load_balancer_id,
|
||||||
|
constants.BUILD_TYPE_PRIORITY: priority, }
|
||||||
|
|
||||||
|
if amp.status == constants.DELETED:
|
||||||
|
LOG.warning('Amphora %s is marked DELETED in the database but '
|
||||||
|
'was submitted for failover. Deleting it from the '
|
||||||
|
'amphora health table to exclude it from health '
|
||||||
|
'checks and skipping the failover.', amp.id)
|
||||||
|
self._amphora_health_repo.delete(db_apis.get_session(),
|
||||||
|
amphora_id=amp.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (CONF.house_keeping.spare_amphora_pool_size == 0) and (
|
||||||
|
CONF.nova.enable_anti_affinity is False):
|
||||||
|
LOG.warning("Failing over amphora with no spares pool may "
|
||||||
|
"cause delays in failover times while a new "
|
||||||
|
"amphora instance boots.")
|
||||||
|
|
||||||
|
# if we run with anti-affinity we need to set the server group
|
||||||
|
# as well
|
||||||
|
lb = self._amphora_repo.get_lb_for_amphora(
|
||||||
|
db_apis.get_session(), amp.id)
|
||||||
|
if CONF.nova.enable_anti_affinity and lb:
|
||||||
|
stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
|
||||||
|
if lb and lb.flavor_id:
|
||||||
|
stored_params[constants.FLAVOR] = (
|
||||||
|
self._flavor_repo.get_flavor_metadata_dict(
|
||||||
|
db_apis.get_session(), lb.flavor_id))
|
||||||
|
else:
|
||||||
|
stored_params[constants.FLAVOR] = {}
|
||||||
|
|
||||||
|
failover_amphora_tf = self._taskflow_load(
|
||||||
|
self._amphora_flows.get_failover_flow(
|
||||||
|
role=amp.role, load_balancer=lb),
|
||||||
|
store=stored_params)
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):
|
||||||
|
failover_amphora_tf.run()
|
||||||
|
|
||||||
|
def failover_amphora(self, amphora_id):
|
||||||
|
"""Perform failover operations for an amphora.
|
||||||
|
|
||||||
|
:param amphora_id: ID for amphora to failover
|
||||||
|
:returns: None
|
||||||
|
:raises AmphoraNotFound: The referenced amphora was not found
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
amp = self._amphora_repo.get(db_apis.get_session(),
|
||||||
|
id=amphora_id)
|
||||||
|
if not amp:
|
||||||
|
LOG.warning("Could not fetch Amphora %s from DB, ignoring "
|
||||||
|
"failover request.", amphora_id)
|
||||||
|
return
|
||||||
|
self._perform_amphora_failover(
|
||||||
|
amp, constants.LB_CREATE_FAILOVER_PRIORITY)
|
||||||
|
if amp.load_balancer_id:
|
||||||
|
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
||||||
|
amp.load_balancer_id)
|
||||||
|
self._lb_repo.update(
|
||||||
|
db_apis.get_session(), amp.load_balancer_id,
|
||||||
|
provisioning_status=constants.ACTIVE)
|
||||||
|
except Exception as e:
|
||||||
|
try:
|
||||||
|
self._lb_repo.update(
|
||||||
|
db_apis.get_session(), amp.load_balancer_id,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
except Exception:
|
||||||
|
LOG.error("Unable to revert LB status to ERROR.")
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("Failover exception: %s", e)
|
||||||
|
|
||||||
|
def failover_loadbalancer(self, load_balancer_id):
|
||||||
|
"""Perform failover operations for a load balancer.
|
||||||
|
|
||||||
|
:param load_balancer_id: ID for load balancer to failover
|
||||||
|
:returns: None
|
||||||
|
:raises LBNotFound: The referenced load balancer was not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Note: This expects that the load balancer is already in
|
||||||
|
# provisioning_status=PENDING_UPDATE state
|
||||||
|
try:
|
||||||
|
lb = self._lb_repo.get(db_apis.get_session(),
|
||||||
|
id=load_balancer_id)
|
||||||
|
|
||||||
|
# Exclude amphora already deleted
|
||||||
|
amps = [a for a in lb.amphorae if a.status != constants.DELETED]
|
||||||
|
for amp in amps:
|
||||||
|
# failover amphora in backup role
|
||||||
|
# Note: this amp may not currently be the backup
|
||||||
|
# TODO(johnsom) Change this to query the amp state
|
||||||
|
# once the amp API supports it.
|
||||||
|
if amp.role == constants.ROLE_BACKUP:
|
||||||
|
self._perform_amphora_failover(
|
||||||
|
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
||||||
|
|
||||||
|
for amp in amps:
|
||||||
|
# failover everyhting else
|
||||||
|
if amp.role != constants.ROLE_BACKUP:
|
||||||
|
self._perform_amphora_failover(
|
||||||
|
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
|
||||||
|
|
||||||
|
self._lb_repo.update(
|
||||||
|
db_apis.get_session(), load_balancer_id,
|
||||||
|
provisioning_status=constants.ACTIVE)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.error("LB %(lbid)s failover exception: %(exc)s",
|
||||||
|
{'lbid': load_balancer_id, 'exc': e})
|
||||||
|
self._lb_repo.update(
|
||||||
|
db_apis.get_session(), load_balancer_id,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
def amphora_cert_rotation(self, amphora_id):
|
||||||
|
"""Perform cert rotation for an amphora.
|
||||||
|
|
||||||
|
:param amphora_id: ID for amphora to rotate
|
||||||
|
:returns: None
|
||||||
|
:raises AmphoraNotFound: The referenced amphora was not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
amp = self._amphora_repo.get(db_apis.get_session(),
|
||||||
|
id=amphora_id)
|
||||||
|
LOG.info("Start amphora cert rotation, amphora's id is: %s", amp.id)
|
||||||
|
|
||||||
|
certrotation_amphora_tf = self._taskflow_load(
|
||||||
|
self._amphora_flows.cert_rotate_amphora_flow(),
|
||||||
|
store={constants.AMPHORA: amp,
|
||||||
|
constants.AMPHORA_ID: amp.id})
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(certrotation_amphora_tf,
|
||||||
|
log=LOG):
|
||||||
|
certrotation_amphora_tf.run()
|
||||||
|
|
||||||
|
def update_amphora_agent_config(self, amphora_id):
|
||||||
|
"""Update the amphora agent configuration.
|
||||||
|
|
||||||
|
Note: This will update the amphora agent configuration file and
|
||||||
|
update the running configuration for mutatable configuration
|
||||||
|
items.
|
||||||
|
|
||||||
|
:param amphora_id: ID of the amphora to update.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
LOG.info("Start amphora agent configuration update, amphora's id "
|
||||||
|
"is: %s", amphora_id)
|
||||||
|
amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
|
||||||
|
lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
|
||||||
|
amphora_id)
|
||||||
|
flavor = {}
|
||||||
|
if lb.flavor_id:
|
||||||
|
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
||||||
|
db_apis.get_session(), lb.flavor_id)
|
||||||
|
|
||||||
|
update_amphora_tf = self._taskflow_load(
|
||||||
|
self._amphora_flows.update_amphora_config_flow(),
|
||||||
|
store={constants.AMPHORA: amp,
|
||||||
|
constants.FLAVOR: flavor})
|
||||||
|
|
||||||
|
with tf_logging.DynamicLoggingListener(update_amphora_tf,
|
||||||
|
log=LOG):
|
||||||
|
update_amphora_tf.run()
|
11
octavia/controller/worker/v2/flows/__init__.py
Normal file
11
octavia/controller/worker/v2/flows/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
599
octavia/controller/worker/v2/flows/amphora_flows.py
Normal file
599
octavia/controller/worker/v2/flows/amphora_flows.py
Normal file
@ -0,0 +1,599 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from taskflow.patterns import graph_flow
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import cert_task
|
||||||
|
from octavia.controller.worker.v2.tasks import compute_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import network_tasks
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraFlows(object):
|
||||||
|
def __init__(self):
|
||||||
|
# for some reason only this has the values from the config file
|
||||||
|
self.REST_AMPHORA_DRIVER = (CONF.controller_worker.amphora_driver ==
|
||||||
|
'amphora_haproxy_rest_driver')
|
||||||
|
|
||||||
|
def get_create_amphora_flow(self):
|
||||||
|
"""Creates a flow to create an amphora.
|
||||||
|
|
||||||
|
:returns: The flow for creating the amphora
|
||||||
|
"""
|
||||||
|
create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW)
|
||||||
|
create_amphora_flow.add(database_tasks.CreateAmphoraInDB(
|
||||||
|
provides=constants.AMPHORA_ID))
|
||||||
|
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
|
||||||
|
requires=constants.AMPHORA_ID))
|
||||||
|
if self.REST_AMPHORA_DRIVER:
|
||||||
|
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
||||||
|
provides=constants.SERVER_PEM))
|
||||||
|
|
||||||
|
create_amphora_flow.add(
|
||||||
|
database_tasks.UpdateAmphoraDBCertExpiration(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
||||||
|
|
||||||
|
create_amphora_flow.add(compute_tasks.CertComputeCreate(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
||||||
|
constants.BUILD_TYPE_PRIORITY, constants.FLAVOR),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
else:
|
||||||
|
create_amphora_flow.add(compute_tasks.ComputeCreate(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.FLAVOR),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
||||||
|
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
|
||||||
|
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID),
|
||||||
|
provides=constants.COMPUTE_OBJ))
|
||||||
|
create_amphora_flow.add(database_tasks.UpdateAmphoraInfo(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
create_amphora_flow.add(
|
||||||
|
amphora_driver_tasks.AmphoraComputeConnectivityWait(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
create_amphora_flow.add(database_tasks.ReloadAmphora(
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
return create_amphora_flow
|
||||||
|
|
||||||
|
def _get_post_map_lb_subflow(self, prefix, role):
|
||||||
|
"""Set amphora type after mapped to lb."""
|
||||||
|
|
||||||
|
sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW
|
||||||
|
post_map_amp_to_lb = linear_flow.Flow(
|
||||||
|
sf_name)
|
||||||
|
|
||||||
|
post_map_amp_to_lb.add(database_tasks.ReloadAmphora(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
|
||||||
|
post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate(
|
||||||
|
name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK,
|
||||||
|
requires=(constants.AMPHORA, constants.FLAVOR)))
|
||||||
|
|
||||||
|
if role == constants.ROLE_MASTER:
|
||||||
|
post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
elif role == constants.ROLE_BACKUP:
|
||||||
|
post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
elif role == constants.ROLE_STANDALONE:
|
||||||
|
post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
return post_map_amp_to_lb
|
||||||
|
|
||||||
|
def _get_create_amp_for_lb_subflow(self, prefix, role):
|
||||||
|
"""Create a new amphora for lb."""
|
||||||
|
|
||||||
|
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
|
||||||
|
create_amp_for_lb_subflow = linear_flow.Flow(sf_name)
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB(
|
||||||
|
name=sf_name + '-' + constants.CREATE_AMPHORA_INDB,
|
||||||
|
provides=constants.AMPHORA_ID))
|
||||||
|
|
||||||
|
require_server_group_id_condition = (
|
||||||
|
role in (constants.ROLE_BACKUP, constants.ROLE_MASTER) and
|
||||||
|
CONF.nova.enable_anti_affinity)
|
||||||
|
|
||||||
|
if self.REST_AMPHORA_DRIVER:
|
||||||
|
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
|
||||||
|
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
|
||||||
|
provides=constants.SERVER_PEM))
|
||||||
|
|
||||||
|
create_amp_for_lb_subflow.add(
|
||||||
|
database_tasks.UpdateAmphoraDBCertExpiration(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
|
||||||
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
||||||
|
|
||||||
|
if require_server_group_id_condition:
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
||||||
|
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
||||||
|
requires=(
|
||||||
|
constants.AMPHORA_ID,
|
||||||
|
constants.SERVER_PEM,
|
||||||
|
constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID,
|
||||||
|
constants.FLAVOR
|
||||||
|
),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
else:
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
||||||
|
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
||||||
|
requires=(
|
||||||
|
constants.AMPHORA_ID,
|
||||||
|
constants.SERVER_PEM,
|
||||||
|
constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.FLAVOR
|
||||||
|
),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
else:
|
||||||
|
if require_server_group_id_condition:
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
|
||||||
|
name=sf_name + '-' + constants.COMPUTE_CREATE,
|
||||||
|
requires=(
|
||||||
|
constants.AMPHORA_ID,
|
||||||
|
constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.SERVER_GROUP_ID,
|
||||||
|
constants.FLAVOR
|
||||||
|
),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
else:
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
|
||||||
|
name=sf_name + '-' + constants.COMPUTE_CREATE,
|
||||||
|
requires=(
|
||||||
|
constants.AMPHORA_ID,
|
||||||
|
constants.BUILD_TYPE_PRIORITY,
|
||||||
|
constants.FLAVOR
|
||||||
|
),
|
||||||
|
provides=constants.COMPUTE_ID))
|
||||||
|
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
|
||||||
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB,
|
||||||
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
||||||
|
create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait(
|
||||||
|
name=sf_name + '-' + constants.COMPUTE_WAIT,
|
||||||
|
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID),
|
||||||
|
provides=constants.COMPUTE_OBJ))
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO,
|
||||||
|
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
create_amp_for_lb_subflow.add(
|
||||||
|
amphora_driver_tasks.AmphoraComputeConnectivityWait(
|
||||||
|
name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
|
||||||
|
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
create_amp_for_lb_subflow.add(
|
||||||
|
database_tasks.MarkAmphoraAllocatedInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
|
||||||
|
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
|
||||||
|
if role == constants.ROLE_MASTER:
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
elif role == constants.ROLE_BACKUP:
|
||||||
|
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
elif role == constants.ROLE_STANDALONE:
|
||||||
|
create_amp_for_lb_subflow.add(
|
||||||
|
database_tasks.MarkAmphoraStandAloneInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
return create_amp_for_lb_subflow
|
||||||
|
|
||||||
|
def _allocate_amp_to_lb_decider(self, history):
|
||||||
|
"""decides if the lb shall be mapped to a spare amphora
|
||||||
|
|
||||||
|
:return: True if a spare amphora exists in DB
|
||||||
|
"""
|
||||||
|
|
||||||
|
return list(history.values())[0] is not None
|
||||||
|
|
||||||
|
def _create_new_amp_for_lb_decider(self, history):
|
||||||
|
"""decides if a new amphora must be created for the lb
|
||||||
|
|
||||||
|
:return: True if there is no spare amphora
|
||||||
|
"""
|
||||||
|
|
||||||
|
return list(history.values())[0] is None
|
||||||
|
|
||||||
|
def get_amphora_for_lb_subflow(
|
||||||
|
self, prefix, role=constants.ROLE_STANDALONE):
|
||||||
|
"""Tries to allocate a spare amphora to a loadbalancer if none
|
||||||
|
|
||||||
|
exists, create a new amphora.
|
||||||
|
"""
|
||||||
|
|
||||||
|
sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW
|
||||||
|
|
||||||
|
# We need a graph flow here for a conditional flow
|
||||||
|
amp_for_lb_flow = graph_flow.Flow(sf_name)
|
||||||
|
|
||||||
|
# Setup the task that maps an amphora to a load balancer
|
||||||
|
allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora(
|
||||||
|
name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.FLAVOR),
|
||||||
|
provides=constants.AMPHORA_ID)
|
||||||
|
|
||||||
|
# Define a subflow for if we successfully map an amphora
|
||||||
|
map_lb_to_amp = self._get_post_map_lb_subflow(prefix, role)
|
||||||
|
# Define a subflow for if we can't map an amphora
|
||||||
|
create_amp = self._get_create_amp_for_lb_subflow(prefix, role)
|
||||||
|
|
||||||
|
# Add them to the graph flow
|
||||||
|
amp_for_lb_flow.add(allocate_and_associate_amp,
|
||||||
|
map_lb_to_amp, create_amp)
|
||||||
|
|
||||||
|
# Setup the decider for the path if we can map an amphora
|
||||||
|
amp_for_lb_flow.link(allocate_and_associate_amp, map_lb_to_amp,
|
||||||
|
decider=self._allocate_amp_to_lb_decider,
|
||||||
|
decider_depth='flow')
|
||||||
|
# Setup the decider for the path if we can't map an amphora
|
||||||
|
amp_for_lb_flow.link(allocate_and_associate_amp, create_amp,
|
||||||
|
decider=self._create_new_amp_for_lb_decider,
|
||||||
|
decider_depth='flow')
|
||||||
|
|
||||||
|
# Plug the network
|
||||||
|
# todo(xgerman): Rework failover flow
|
||||||
|
if prefix != constants.FAILOVER_AMPHORA_FLOW:
|
||||||
|
sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW
|
||||||
|
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
||||||
|
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
||||||
|
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
|
||||||
|
return amp_for_lb_net_flow
|
||||||
|
|
||||||
|
return amp_for_lb_flow
|
||||||
|
|
||||||
|
def _get_amp_net_subflow(self, sf_name):
|
||||||
|
flows = []
|
||||||
|
flows.append(network_tasks.PlugVIPAmpphora(
|
||||||
|
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.SUBNET),
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
|
||||||
|
flows.append(network_tasks.ApplyQosAmphora(
|
||||||
|
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMP_DATA,
|
||||||
|
constants.UPDATE_DICT)))
|
||||||
|
flows.append(database_tasks.UpdateAmphoraVIPData(
|
||||||
|
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
|
||||||
|
requires=constants.AMP_DATA))
|
||||||
|
flows.append(database_tasks.ReloadAmphora(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
flows.append(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
flows.append(network_tasks.GetAmphoraNetworkConfigs(
|
||||||
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
||||||
|
provides=constants.AMPHORA_NETWORK_CONFIG))
|
||||||
|
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
|
||||||
|
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
|
||||||
|
rebind={constants.AMPHORAE_NETWORK_CONFIG:
|
||||||
|
constants.AMPHORA_NETWORK_CONFIG},
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
return flows
|
||||||
|
|
||||||
|
def get_delete_amphora_flow(self):
|
||||||
|
"""Creates a flow to delete an amphora.
|
||||||
|
|
||||||
|
This should be configurable in the config file
|
||||||
|
:returns: The flow for deleting the amphora
|
||||||
|
:raises AmphoraNotFound: The referenced Amphora was not found
|
||||||
|
"""
|
||||||
|
|
||||||
|
delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW)
|
||||||
|
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
delete_amphora_flow.add(database_tasks.
|
||||||
|
MarkAmphoraPendingDeleteInDB(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
delete_amphora_flow.add(database_tasks.
|
||||||
|
MarkAmphoraHealthBusy(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
delete_amphora_flow.add(compute_tasks.ComputeDelete(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
delete_amphora_flow.add(database_tasks.
|
||||||
|
DisableAmphoraHealthMonitoring(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
delete_amphora_flow.add(database_tasks.
|
||||||
|
MarkAmphoraDeletedInDB(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
return delete_amphora_flow
|
||||||
|
|
||||||
|
def get_failover_flow(self, role=constants.ROLE_STANDALONE,
|
||||||
|
load_balancer=None):
|
||||||
|
"""Creates a flow to failover a stale amphora
|
||||||
|
|
||||||
|
:returns: The flow for amphora failover
|
||||||
|
"""
|
||||||
|
|
||||||
|
failover_amphora_flow = linear_flow.Flow(
|
||||||
|
constants.FAILOVER_AMPHORA_FLOW)
|
||||||
|
|
||||||
|
failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
# Note: It seems intuitive to boot an amphora prior to deleting
|
||||||
|
# the old amphora, however this is a complicated issue.
|
||||||
|
# If the target host (due to anit-affinity) is resource
|
||||||
|
# constrained, this will fail where a post-delete will
|
||||||
|
# succeed. Since this is async with the API it would result
|
||||||
|
# in the LB ending in ERROR though the amps are still alive.
|
||||||
|
# Consider in the future making this a complicated
|
||||||
|
# try-on-failure-retry flow, or move upgrade failovers to be
|
||||||
|
# synchronous with the API. For now spares pool and act/stdby
|
||||||
|
# will mitigate most of this delay.
|
||||||
|
|
||||||
|
# Delete the old amphora
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.MarkAmphoraPendingDeleteInDB(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.MarkAmphoraHealthBusy(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
failover_amphora_flow.add(compute_tasks.ComputeDelete(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
failover_amphora_flow.add(network_tasks.WaitForPortDetach(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
# If this is an unallocated amp (spares pool), we're done
|
||||||
|
if not load_balancer:
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.DisableAmphoraHealthMonitoring(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
return failover_amphora_flow
|
||||||
|
|
||||||
|
# Save failed amphora details for later
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.GetAmphoraDetails(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA,
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
|
||||||
|
# Get a new amphora
|
||||||
|
# Note: Role doesn't matter here. We will update it later.
|
||||||
|
get_amp_subflow = self.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.FAILOVER_AMPHORA_FLOW)
|
||||||
|
failover_amphora_flow.add(get_amp_subflow)
|
||||||
|
|
||||||
|
# Update the new amphora with the failed amphora details
|
||||||
|
failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails(
|
||||||
|
requires=(constants.AMPHORA, constants.AMP_DATA)))
|
||||||
|
|
||||||
|
# Update the data stored in the flow from the database
|
||||||
|
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
failover_amphora_flow.add(database_tasks.ReloadAmphora(
|
||||||
|
requires=constants.AMPHORA_ID,
|
||||||
|
provides=constants.AMPHORA))
|
||||||
|
|
||||||
|
# Prepare to reconnect the network interface(s)
|
||||||
|
failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
|
failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer(
|
||||||
|
requires=constants.LOADBALANCER, provides=constants.LISTENERS))
|
||||||
|
failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
||||||
|
requires=constants.LOADBALANCER, provides=constants.AMPHORAE))
|
||||||
|
|
||||||
|
# Plug the VIP ports into the new amphora
|
||||||
|
# The reason for moving these steps here is the udp listeners want to
|
||||||
|
# do some kernel configuration before Listener update for forbidding
|
||||||
|
# failure during rebuild amphora.
|
||||||
|
failover_amphora_flow.add(network_tasks.PlugVIPPort(
|
||||||
|
requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
|
||||||
|
requires=(constants.AMPHORA, constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
|
||||||
|
# Listeners update needs to be run on all amphora to update
|
||||||
|
# their peer configurations. So parallelize this with an
|
||||||
|
# unordered subflow.
|
||||||
|
update_amps_subflow = unordered_flow.Flow(
|
||||||
|
constants.UPDATE_AMPS_SUBFLOW)
|
||||||
|
|
||||||
|
timeout_dict = {
|
||||||
|
constants.CONN_MAX_RETRIES:
|
||||||
|
CONF.haproxy_amphora.active_connection_max_retries,
|
||||||
|
constants.CONN_RETRY_INTERVAL:
|
||||||
|
CONF.haproxy_amphora.active_connection_rety_interval}
|
||||||
|
|
||||||
|
# Setup parallel flows for each amp. We don't know the new amp
|
||||||
|
# details at flow creation time, so setup a subflow for each
|
||||||
|
# amp on the LB, they let the task index into a list of amps
|
||||||
|
# to find the amphora it should work on.
|
||||||
|
amp_index = 0
|
||||||
|
for amp in load_balancer.amphorae:
|
||||||
|
if amp.status == constants.DELETED:
|
||||||
|
continue
|
||||||
|
update_amps_subflow.add(
|
||||||
|
amphora_driver_tasks.AmpListenersUpdate(
|
||||||
|
name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
|
||||||
|
requires=(constants.LISTENERS, constants.AMPHORAE),
|
||||||
|
inject={constants.AMPHORA_INDEX: amp_index,
|
||||||
|
constants.TIMEOUT_DICT: timeout_dict}))
|
||||||
|
amp_index += 1
|
||||||
|
|
||||||
|
failover_amphora_flow.add(update_amps_subflow)
|
||||||
|
|
||||||
|
# Plug the member networks into the new amphora
|
||||||
|
failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta(
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA),
|
||||||
|
provides=constants.DELTA))
|
||||||
|
|
||||||
|
failover_amphora_flow.add(network_tasks.HandleNetworkDelta(
|
||||||
|
requires=(constants.AMPHORA, constants.DELTA),
|
||||||
|
provides=constants.ADDED_PORTS))
|
||||||
|
|
||||||
|
failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
|
||||||
|
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
|
||||||
|
|
||||||
|
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
name='octavia-failover-LB-reload-2',
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
# Handle the amphora role and VRRP if necessary
|
||||||
|
if role == constants.ROLE_MASTER:
|
||||||
|
failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB(
|
||||||
|
name=constants.MARK_AMP_MASTER_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
vrrp_subflow = self.get_vrrp_subflow(role)
|
||||||
|
failover_amphora_flow.add(vrrp_subflow)
|
||||||
|
elif role == constants.ROLE_BACKUP:
|
||||||
|
failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB(
|
||||||
|
name=constants.MARK_AMP_BACKUP_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
vrrp_subflow = self.get_vrrp_subflow(role)
|
||||||
|
failover_amphora_flow.add(vrrp_subflow)
|
||||||
|
elif role == constants.ROLE_STANDALONE:
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.MarkAmphoraStandAloneInDB(
|
||||||
|
name=constants.MARK_AMP_STANDALONE_INDB,
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
failover_amphora_flow.add(amphora_driver_tasks.ListenersStart(
|
||||||
|
requires=(constants.LOADBALANCER, constants.LISTENERS,
|
||||||
|
constants.AMPHORA)))
|
||||||
|
failover_amphora_flow.add(
|
||||||
|
database_tasks.DisableAmphoraHealthMonitoring(
|
||||||
|
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
return failover_amphora_flow
|
||||||
|
|
||||||
|
def get_vrrp_subflow(self, prefix):
|
||||||
|
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
||||||
|
vrrp_subflow = linear_flow.Flow(sf_name)
|
||||||
|
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
||||||
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
|
vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface(
|
||||||
|
name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF,
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
|
||||||
|
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
|
||||||
|
name=sf_name + '-' + constants.AMP_VRRP_UPDATE,
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart(
|
||||||
|
name=sf_name + '-' + constants.AMP_VRRP_START,
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
return vrrp_subflow
|
||||||
|
|
||||||
|
def cert_rotate_amphora_flow(self):
|
||||||
|
"""Implement rotation for amphora's cert.
|
||||||
|
|
||||||
|
1. Create a new certificate
|
||||||
|
2. Upload the cert to amphora
|
||||||
|
3. update the newly created certificate info to amphora
|
||||||
|
4. update the cert_busy flag to be false after rotation
|
||||||
|
|
||||||
|
:returns: The flow for updating an amphora
|
||||||
|
"""
|
||||||
|
rotated_amphora_flow = linear_flow.Flow(
|
||||||
|
constants.CERT_ROTATE_AMPHORA_FLOW)
|
||||||
|
|
||||||
|
rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
# create a new certificate, the returned value is the newly created
|
||||||
|
# certificate
|
||||||
|
rotated_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
||||||
|
provides=constants.SERVER_PEM))
|
||||||
|
|
||||||
|
# update it in amphora task
|
||||||
|
rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload(
|
||||||
|
requires=(constants.AMPHORA, constants.SERVER_PEM)))
|
||||||
|
|
||||||
|
# update the newly created certificate info to amphora
|
||||||
|
rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration(
|
||||||
|
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
||||||
|
|
||||||
|
# update the cert_busy flag to be false after rotation
|
||||||
|
rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
return rotated_amphora_flow
|
||||||
|
|
||||||
|
def update_amphora_config_flow(self):
|
||||||
|
"""Creates a flow to update the amphora agent configuration.
|
||||||
|
|
||||||
|
:returns: The flow for updating an amphora
|
||||||
|
"""
|
||||||
|
update_amphora_flow = linear_flow.Flow(
|
||||||
|
constants.UPDATE_AMPHORA_CONFIG_FLOW)
|
||||||
|
|
||||||
|
update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
||||||
|
requires=constants.AMPHORA))
|
||||||
|
|
||||||
|
update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate(
|
||||||
|
requires=(constants.AMPHORA, constants.FLAVOR)))
|
||||||
|
|
||||||
|
return update_amphora_flow
|
105
octavia/controller/worker/v2/flows/health_monitor_flows.py
Normal file
105
octavia/controller/worker/v2/flows/health_monitor_flows.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class HealthMonitorFlows(object):
|
||||||
|
|
||||||
|
def get_create_health_monitor_flow(self):
|
||||||
|
"""Create a flow to create a health monitor
|
||||||
|
|
||||||
|
:returns: The flow for creating a health monitor
|
||||||
|
"""
|
||||||
|
create_hm_flow = linear_flow.Flow(constants.CREATE_HEALTH_MONITOR_FLOW)
|
||||||
|
create_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
|
||||||
|
requires=[constants.HEALTH_MON,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
create_hm_flow.add(database_tasks.MarkHealthMonitorPendingCreateInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
create_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
create_hm_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
create_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return create_hm_flow
|
||||||
|
|
||||||
|
def get_delete_health_monitor_flow(self):
|
||||||
|
"""Create a flow to delete a health monitor
|
||||||
|
|
||||||
|
:returns: The flow for deleting a health monitor
|
||||||
|
"""
|
||||||
|
delete_hm_flow = linear_flow.Flow(constants.DELETE_HEALTH_MONITOR_FLOW)
|
||||||
|
delete_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
|
||||||
|
requires=[constants.HEALTH_MON,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
delete_hm_flow.add(database_tasks.MarkHealthMonitorPendingDeleteInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
delete_hm_flow.add(model_tasks.
|
||||||
|
DeleteModelObject(rebind={constants.OBJECT:
|
||||||
|
constants.HEALTH_MON}))
|
||||||
|
delete_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
delete_hm_flow.add(database_tasks.DeleteHealthMonitorInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
delete_hm_flow.add(database_tasks.DecrementHealthMonitorQuota(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
delete_hm_flow.add(
|
||||||
|
database_tasks.UpdatePoolMembersOperatingStatusInDB(
|
||||||
|
requires=constants.POOL,
|
||||||
|
inject={constants.OPERATING_STATUS: constants.NO_MONITOR}))
|
||||||
|
delete_hm_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
delete_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return delete_hm_flow
|
||||||
|
|
||||||
|
def get_update_health_monitor_flow(self):
|
||||||
|
"""Create a flow to update a health monitor
|
||||||
|
|
||||||
|
:returns: The flow for updating a health monitor
|
||||||
|
"""
|
||||||
|
update_hm_flow = linear_flow.Flow(constants.UPDATE_HEALTH_MONITOR_FLOW)
|
||||||
|
update_hm_flow.add(lifecycle_tasks.HealthMonitorToErrorOnRevertTask(
|
||||||
|
requires=[constants.HEALTH_MON,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
update_hm_flow.add(database_tasks.MarkHealthMonitorPendingUpdateInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
update_hm_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_hm_flow.add(database_tasks.UpdateHealthMonInDB(
|
||||||
|
requires=[constants.HEALTH_MON, constants.UPDATE_DICT]))
|
||||||
|
update_hm_flow.add(database_tasks.MarkHealthMonitorActiveInDB(
|
||||||
|
requires=constants.HEALTH_MON))
|
||||||
|
update_hm_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
update_hm_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_hm_flow
|
92
octavia/controller/worker/v2/flows/l7policy_flows.py
Normal file
92
octavia/controller/worker/v2/flows/l7policy_flows.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# Copyright 2016 Blue Box, an IBM Company
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class L7PolicyFlows(object):
|
||||||
|
|
||||||
|
def get_create_l7policy_flow(self):
|
||||||
|
"""Create a flow to create an L7 policy
|
||||||
|
|
||||||
|
:returns: The flow for creating an L7 policy
|
||||||
|
"""
|
||||||
|
create_l7policy_flow = linear_flow.Flow(constants.CREATE_L7POLICY_FLOW)
|
||||||
|
create_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7POLICY,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
create_l7policy_flow.add(database_tasks.MarkL7PolicyPendingCreateInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
create_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
create_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return create_l7policy_flow
|
||||||
|
|
||||||
|
def get_delete_l7policy_flow(self):
|
||||||
|
"""Create a flow to delete an L7 policy
|
||||||
|
|
||||||
|
:returns: The flow for deleting an L7 policy
|
||||||
|
"""
|
||||||
|
delete_l7policy_flow = linear_flow.Flow(constants.DELETE_L7POLICY_FLOW)
|
||||||
|
delete_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7POLICY,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
delete_l7policy_flow.add(database_tasks.MarkL7PolicyPendingDeleteInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
delete_l7policy_flow.add(model_tasks.DeleteModelObject(
|
||||||
|
rebind={constants.OBJECT: constants.L7POLICY}))
|
||||||
|
delete_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
delete_l7policy_flow.add(database_tasks.DeleteL7PolicyInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
delete_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return delete_l7policy_flow
|
||||||
|
|
||||||
|
def get_update_l7policy_flow(self):
|
||||||
|
"""Create a flow to update an L7 policy
|
||||||
|
|
||||||
|
:returns: The flow for updating an L7 policy
|
||||||
|
"""
|
||||||
|
update_l7policy_flow = linear_flow.Flow(constants.UPDATE_L7POLICY_FLOW)
|
||||||
|
update_l7policy_flow.add(lifecycle_tasks.L7PolicyToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7POLICY,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
update_l7policy_flow.add(database_tasks.MarkL7PolicyPendingUpdateInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
update_l7policy_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_l7policy_flow.add(database_tasks.UpdateL7PolicyInDB(
|
||||||
|
requires=[constants.L7POLICY, constants.UPDATE_DICT]))
|
||||||
|
update_l7policy_flow.add(database_tasks.MarkL7PolicyActiveInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
update_l7policy_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_l7policy_flow
|
98
octavia/controller/worker/v2/flows/l7rule_flows.py
Normal file
98
octavia/controller/worker/v2/flows/l7rule_flows.py
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
# Copyright 2016 Blue Box, an IBM Company
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class L7RuleFlows(object):
|
||||||
|
|
||||||
|
def get_create_l7rule_flow(self):
|
||||||
|
"""Create a flow to create an L7 rule
|
||||||
|
|
||||||
|
:returns: The flow for creating an L7 rule
|
||||||
|
"""
|
||||||
|
create_l7rule_flow = linear_flow.Flow(constants.CREATE_L7RULE_FLOW)
|
||||||
|
create_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7RULE,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
create_l7rule_flow.add(database_tasks.MarkL7RulePendingCreateInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
create_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
create_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
create_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return create_l7rule_flow
|
||||||
|
|
||||||
|
def get_delete_l7rule_flow(self):
|
||||||
|
"""Create a flow to delete an L7 rule
|
||||||
|
|
||||||
|
:returns: The flow for deleting an L7 rule
|
||||||
|
"""
|
||||||
|
delete_l7rule_flow = linear_flow.Flow(constants.DELETE_L7RULE_FLOW)
|
||||||
|
delete_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7RULE,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
delete_l7rule_flow.add(database_tasks.MarkL7RulePendingDeleteInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
delete_l7rule_flow.add(model_tasks.DeleteModelObject(
|
||||||
|
rebind={constants.OBJECT: constants.L7RULE}))
|
||||||
|
delete_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
delete_l7rule_flow.add(database_tasks.DeleteL7RuleInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
delete_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
delete_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return delete_l7rule_flow
|
||||||
|
|
||||||
|
def get_update_l7rule_flow(self):
|
||||||
|
"""Create a flow to update an L7 rule
|
||||||
|
|
||||||
|
:returns: The flow for updating an L7 rule
|
||||||
|
"""
|
||||||
|
update_l7rule_flow = linear_flow.Flow(constants.UPDATE_L7RULE_FLOW)
|
||||||
|
update_l7rule_flow.add(lifecycle_tasks.L7RuleToErrorOnRevertTask(
|
||||||
|
requires=[constants.L7RULE,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
update_l7rule_flow.add(database_tasks.MarkL7RulePendingUpdateInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
update_l7rule_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_l7rule_flow.add(database_tasks.UpdateL7RuleInDB(
|
||||||
|
requires=[constants.L7RULE, constants.UPDATE_DICT]))
|
||||||
|
update_l7rule_flow.add(database_tasks.MarkL7RuleActiveInDB(
|
||||||
|
requires=constants.L7RULE))
|
||||||
|
update_l7rule_flow.add(database_tasks.MarkL7PolicyActiveInDB(
|
||||||
|
requires=constants.L7POLICY))
|
||||||
|
update_l7rule_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_l7rule_flow
|
126
octavia/controller/worker/v2/flows/listener_flows.py
Normal file
126
octavia/controller/worker/v2/flows/listener_flows.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import network_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerFlows(object):
|
||||||
|
|
||||||
|
def get_create_listener_flow(self):
|
||||||
|
"""Create a flow to create a listener
|
||||||
|
|
||||||
|
:returns: The flow for creating a listener
|
||||||
|
"""
|
||||||
|
create_listener_flow = linear_flow.Flow(constants.CREATE_LISTENER_FLOW)
|
||||||
|
create_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_listener_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
create_listener_flow.add(database_tasks.
|
||||||
|
MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS]))
|
||||||
|
return create_listener_flow
|
||||||
|
|
||||||
|
def get_create_all_listeners_flow(self):
|
||||||
|
"""Create a flow to create all listeners
|
||||||
|
|
||||||
|
:returns: The flow for creating all listeners
|
||||||
|
"""
|
||||||
|
create_all_listeners_flow = linear_flow.Flow(
|
||||||
|
constants.CREATE_LISTENERS_FLOW)
|
||||||
|
create_all_listeners_flow.add(
|
||||||
|
database_tasks.GetListenersFromLoadbalancer(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.LISTENERS))
|
||||||
|
create_all_listeners_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
create_all_listeners_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_all_listeners_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
return create_all_listeners_flow
|
||||||
|
|
||||||
|
def get_delete_listener_flow(self):
|
||||||
|
"""Create a flow to delete a listener
|
||||||
|
|
||||||
|
:returns: The flow for deleting a listener
|
||||||
|
"""
|
||||||
|
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
|
||||||
|
delete_listener_flow.add(lifecycle_tasks.ListenerToErrorOnRevertTask(
|
||||||
|
requires=constants.LISTENER))
|
||||||
|
delete_listener_flow.add(amphora_driver_tasks.ListenerDelete(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENER]))
|
||||||
|
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
|
||||||
|
requires=constants.LISTENER))
|
||||||
|
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
|
||||||
|
requires=constants.LISTENER))
|
||||||
|
delete_listener_flow.add(database_tasks.MarkLBActiveInDB(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
return delete_listener_flow
|
||||||
|
|
||||||
|
def get_delete_listener_internal_flow(self, listener_name):
|
||||||
|
"""Create a flow to delete a listener and l7policies internally
|
||||||
|
|
||||||
|
(will skip deletion on the amp and marking LB active)
|
||||||
|
|
||||||
|
:returns: The flow for deleting a listener
|
||||||
|
"""
|
||||||
|
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW)
|
||||||
|
# Should cascade delete all L7 policies
|
||||||
|
delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
|
||||||
|
name='delete_update_vip_' + listener_name,
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_listener_flow.add(database_tasks.DeleteListenerInDB(
|
||||||
|
name='delete_listener_in_db_' + listener_name,
|
||||||
|
requires=constants.LISTENER,
|
||||||
|
rebind={constants.LISTENER: listener_name}))
|
||||||
|
delete_listener_flow.add(database_tasks.DecrementListenerQuota(
|
||||||
|
name='decrement_listener_quota_' + listener_name,
|
||||||
|
requires=constants.LISTENER,
|
||||||
|
rebind={constants.LISTENER: listener_name}))
|
||||||
|
|
||||||
|
return delete_listener_flow
|
||||||
|
|
||||||
|
def get_update_listener_flow(self):
|
||||||
|
"""Create a flow to update a listener
|
||||||
|
|
||||||
|
:returns: The flow for updating a listener
|
||||||
|
"""
|
||||||
|
update_listener_flow = linear_flow.Flow(constants.UPDATE_LISTENER_FLOW)
|
||||||
|
update_listener_flow.add(lifecycle_tasks.ListenersToErrorOnRevertTask(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_listener_flow.add(database_tasks.UpdateListenerInDB(
|
||||||
|
requires=[constants.LISTENER, constants.UPDATE_DICT]))
|
||||||
|
update_listener_flow.add(database_tasks.
|
||||||
|
MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_listener_flow
|
341
octavia/controller/worker/v2/flows/load_balancer_flows.py
Normal file
341
octavia/controller/worker/v2/flows/load_balancer_flows.py
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.controller.worker.v2.flows import amphora_flows
|
||||||
|
from octavia.controller.worker.v2.flows import listener_flows
|
||||||
|
from octavia.controller.worker.v2.flows import member_flows
|
||||||
|
from octavia.controller.worker.v2.flows import pool_flows
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import compute_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import network_tasks
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LoadBalancerFlows(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.amp_flows = amphora_flows.AmphoraFlows()
|
||||||
|
self.listener_flows = listener_flows.ListenerFlows()
|
||||||
|
self.pool_flows = pool_flows.PoolFlows()
|
||||||
|
self.member_flows = member_flows.MemberFlows()
|
||||||
|
|
||||||
|
def get_create_load_balancer_flow(self, topology, listeners=None):
|
||||||
|
"""Creates a conditional graph flow that allocates a loadbalancer to
|
||||||
|
|
||||||
|
two spare amphorae.
|
||||||
|
:raises InvalidTopology: Invalid topology specified
|
||||||
|
:return: The graph flow for creating a loadbalancer.
|
||||||
|
"""
|
||||||
|
f_name = constants.CREATE_LOADBALANCER_FLOW
|
||||||
|
lb_create_flow = linear_flow.Flow(f_name)
|
||||||
|
|
||||||
|
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
|
||||||
|
requires=constants.LOADBALANCER_ID))
|
||||||
|
|
||||||
|
# allocate VIP
|
||||||
|
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER
|
||||||
|
))
|
||||||
|
lb_create_flow.add(network_tasks.AllocateVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.VIP))
|
||||||
|
lb_create_flow.add(database_tasks.UpdateVIPAfterAllocation(
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
lb_create_flow.add(network_tasks.GetSubnetFromVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.SUBNET))
|
||||||
|
|
||||||
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
lb_create_flow.add(*self._create_active_standby_topology())
|
||||||
|
elif topology == constants.TOPOLOGY_SINGLE:
|
||||||
|
lb_create_flow.add(*self._create_single_topology())
|
||||||
|
else:
|
||||||
|
LOG.error("Unknown topology: %s. Unable to build load balancer.",
|
||||||
|
topology)
|
||||||
|
raise exceptions.InvalidTopology(topology=topology)
|
||||||
|
|
||||||
|
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
||||||
|
lb_create_flow.add(
|
||||||
|
self.get_post_lb_amp_association_flow(
|
||||||
|
post_amp_prefix, topology, mark_active=(not listeners)))
|
||||||
|
|
||||||
|
if listeners:
|
||||||
|
lb_create_flow.add(*self._create_listeners_flow())
|
||||||
|
|
||||||
|
return lb_create_flow
|
||||||
|
|
||||||
|
def _create_single_topology(self):
|
||||||
|
return (self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.ROLE_STANDALONE,
|
||||||
|
role=constants.ROLE_STANDALONE), )
|
||||||
|
|
||||||
|
def _create_active_standby_topology(
|
||||||
|
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
|
||||||
|
# When we boot up amphora for an active/standby topology,
|
||||||
|
# we should leverage the Nova anti-affinity capabilities
|
||||||
|
# to place the amphora on different hosts, also we need to check
|
||||||
|
# if anti-affinity-flag is enabled or not:
|
||||||
|
anti_affinity = CONF.nova.enable_anti_affinity
|
||||||
|
flows = []
|
||||||
|
if anti_affinity:
|
||||||
|
# we need to create a server group first
|
||||||
|
flows.append(
|
||||||
|
compute_tasks.NovaServerGroupCreate(
|
||||||
|
name=lf_name + '-' +
|
||||||
|
constants.CREATE_SERVER_GROUP_FLOW,
|
||||||
|
requires=(constants.LOADBALANCER_ID),
|
||||||
|
provides=constants.SERVER_GROUP_ID))
|
||||||
|
|
||||||
|
# update server group id in lb table
|
||||||
|
flows.append(
|
||||||
|
database_tasks.UpdateLBServerGroupInDB(
|
||||||
|
name=lf_name + '-' +
|
||||||
|
constants.UPDATE_LB_SERVERGROUPID_FLOW,
|
||||||
|
requires=(constants.LOADBALANCER_ID,
|
||||||
|
constants.SERVER_GROUP_ID)))
|
||||||
|
|
||||||
|
f_name = constants.CREATE_LOADBALANCER_FLOW
|
||||||
|
amps_flow = unordered_flow.Flow(f_name)
|
||||||
|
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER
|
||||||
|
)
|
||||||
|
|
||||||
|
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
|
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP)
|
||||||
|
amps_flow.add(master_amp_sf, backup_amp_sf)
|
||||||
|
|
||||||
|
return flows + [amps_flow]
|
||||||
|
|
||||||
|
def _create_listeners_flow(self):
|
||||||
|
flows = []
|
||||||
|
flows.append(
|
||||||
|
database_tasks.ReloadLoadBalancer(
|
||||||
|
name=constants.RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER
|
||||||
|
)
|
||||||
|
)
|
||||||
|
flows.append(
|
||||||
|
network_tasks.CalculateDelta(
|
||||||
|
requires=constants.LOADBALANCER, provides=constants.DELTAS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
flows.append(
|
||||||
|
network_tasks.HandleNetworkDeltas(
|
||||||
|
requires=constants.DELTAS, provides=constants.ADDED_PORTS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
flows.append(
|
||||||
|
amphora_driver_tasks.AmphoraePostNetworkPlug(
|
||||||
|
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
flows.append(
|
||||||
|
self.listener_flows.get_create_all_listeners_flow()
|
||||||
|
)
|
||||||
|
flows.append(
|
||||||
|
database_tasks.MarkLBActiveInDB(
|
||||||
|
mark_subobjects=True,
|
||||||
|
requires=constants.LOADBALANCER
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return flows
|
||||||
|
|
||||||
|
def get_post_lb_amp_association_flow(self, prefix, topology,
|
||||||
|
mark_active=True):
|
||||||
|
"""Reload the loadbalancer and create networking subflows for
|
||||||
|
|
||||||
|
created/allocated amphorae.
|
||||||
|
:return: Post amphorae association subflow
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Note: If any task in this flow failed, the created amphorae will be
|
||||||
|
# left ''incorrectly'' allocated to the loadbalancer. Likely,
|
||||||
|
# the get_new_LB_networking_subflow is the most prune to failure
|
||||||
|
# shall deallocate the amphora from its loadbalancer and put it in a
|
||||||
|
# READY state.
|
||||||
|
|
||||||
|
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
|
||||||
|
post_create_LB_flow = linear_flow.Flow(sf_name)
|
||||||
|
post_create_LB_flow.add(
|
||||||
|
database_tasks.ReloadLoadBalancer(
|
||||||
|
name=sf_name + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
|
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
|
||||||
|
post_create_LB_flow.add(vrrp_subflow)
|
||||||
|
|
||||||
|
post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
|
||||||
|
if mark_active:
|
||||||
|
post_create_LB_flow.add(database_tasks.MarkLBActiveInDB(
|
||||||
|
name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB,
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
return post_create_LB_flow
|
||||||
|
|
||||||
|
def _get_delete_listeners_flow(self, lb):
|
||||||
|
"""Sets up an internal delete flow
|
||||||
|
|
||||||
|
Because task flow doesn't support loops we store each listener
|
||||||
|
we want to delete in the store part and then rebind
|
||||||
|
:param lb: load balancer
|
||||||
|
:return: (flow, store) -- flow for the deletion and store with all
|
||||||
|
the listeners stored properly
|
||||||
|
"""
|
||||||
|
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
|
||||||
|
store = {}
|
||||||
|
for listener in lb.listeners:
|
||||||
|
listener_name = 'listener_' + listener.id
|
||||||
|
store[listener_name] = listener
|
||||||
|
listeners_delete_flow.add(
|
||||||
|
self.listener_flows.get_delete_listener_internal_flow(
|
||||||
|
listener_name))
|
||||||
|
return (listeners_delete_flow, store)
|
||||||
|
|
||||||
|
def get_delete_load_balancer_flow(self, lb):
|
||||||
|
"""Creates a flow to delete a load balancer.
|
||||||
|
|
||||||
|
:returns: The flow for deleting a load balancer
|
||||||
|
"""
|
||||||
|
return self._get_delete_load_balancer_flow(lb, False)
|
||||||
|
|
||||||
|
def _get_delete_pools_flow(self, lb):
|
||||||
|
"""Sets up an internal delete flow
|
||||||
|
|
||||||
|
Because task flow doesn't support loops we store each pool
|
||||||
|
we want to delete in the store part and then rebind
|
||||||
|
:param lb: load balancer
|
||||||
|
:return: (flow, store) -- flow for the deletion and store with all
|
||||||
|
the listeners stored properly
|
||||||
|
"""
|
||||||
|
pools_delete_flow = unordered_flow.Flow('pool_delete_flow')
|
||||||
|
store = {}
|
||||||
|
for pool in lb.pools:
|
||||||
|
pool_name = 'pool' + pool.id
|
||||||
|
store[pool_name] = pool
|
||||||
|
pools_delete_flow.add(
|
||||||
|
self.pool_flows.get_delete_pool_flow_internal(
|
||||||
|
pool_name))
|
||||||
|
return (pools_delete_flow, store)
|
||||||
|
|
||||||
|
def _get_delete_load_balancer_flow(self, lb, cascade):
|
||||||
|
store = {}
|
||||||
|
delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)
|
||||||
|
delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(compute_tasks.NovaServerGroupDelete(
|
||||||
|
requires=constants.SERVER_GROUP_ID))
|
||||||
|
delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
if cascade:
|
||||||
|
(listeners_delete, store) = self._get_delete_listeners_flow(lb)
|
||||||
|
(pools_delete, pool_store) = self._get_delete_pools_flow(lb)
|
||||||
|
store.update(pool_store)
|
||||||
|
delete_LB_flow.add(pools_delete)
|
||||||
|
delete_LB_flow.add(listeners_delete)
|
||||||
|
delete_LB_flow.add(network_tasks.UnplugVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(network_tasks.DeallocateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(compute_tasks.DeleteAmphoraeOnLoadBalancer(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(database_tasks.MarkLBAmphoraeDeletedInDB(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(database_tasks.DisableLBAmphoraeHealthMonitoring(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
return (delete_LB_flow, store)
|
||||||
|
|
||||||
|
def get_cascade_delete_load_balancer_flow(self, lb):
|
||||||
|
"""Creates a flow to delete a load balancer.
|
||||||
|
|
||||||
|
:returns: The flow for deleting a load balancer
|
||||||
|
"""
|
||||||
|
return self._get_delete_load_balancer_flow(lb, True)
|
||||||
|
|
||||||
|
def get_new_LB_networking_subflow(self):
|
||||||
|
"""Create a sub-flow to setup networking.
|
||||||
|
|
||||||
|
:returns: The flow to setup networking for a new amphora
|
||||||
|
"""
|
||||||
|
|
||||||
|
new_LB_net_subflow = linear_flow.Flow(constants.
|
||||||
|
LOADBALANCER_NETWORKING_SUBFLOW)
|
||||||
|
new_LB_net_subflow.add(network_tasks.AllocateVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.VIP))
|
||||||
|
new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation(
|
||||||
|
requires=(constants.LOADBALANCER_ID, constants.VIP),
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
new_LB_net_subflow.add(network_tasks.PlugVIP(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.AMPS_DATA))
|
||||||
|
new_LB_net_subflow.add(network_tasks.ApplyQos(
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
|
||||||
|
constants.UPDATE_DICT)))
|
||||||
|
new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData(
|
||||||
|
requires=constants.AMPS_DATA))
|
||||||
|
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
|
||||||
|
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
|
||||||
|
requires=constants.LOADBALANCER_ID,
|
||||||
|
provides=constants.LOADBALANCER))
|
||||||
|
new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
||||||
|
new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug(
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.AMPHORAE_NETWORK_CONFIG)))
|
||||||
|
|
||||||
|
return new_LB_net_subflow
|
||||||
|
|
||||||
|
def get_update_load_balancer_flow(self):
|
||||||
|
"""Creates a flow to update a load balancer.
|
||||||
|
|
||||||
|
:returns: The flow for update a load balancer
|
||||||
|
"""
|
||||||
|
update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW)
|
||||||
|
update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
update_LB_flow.add(network_tasks.ApplyQos(
|
||||||
|
requires=(constants.LOADBALANCER, constants.UPDATE_DICT)))
|
||||||
|
update_LB_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
|
||||||
|
update_LB_flow.add(database_tasks.MarkLBActiveInDB(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
|
|
||||||
|
return update_LB_flow
|
209
octavia/controller/worker/v2/flows/member_flows.py
Normal file
209
octavia/controller/worker/v2/flows/member_flows.py
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
from taskflow.patterns import unordered_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import model_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import network_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class MemberFlows(object):
|
||||||
|
|
||||||
|
def get_create_member_flow(self):
|
||||||
|
"""Create a flow to create a member
|
||||||
|
|
||||||
|
:returns: The flow for creating a member
|
||||||
|
"""
|
||||||
|
create_member_flow = linear_flow.Flow(constants.CREATE_MEMBER_FLOW)
|
||||||
|
create_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
|
||||||
|
requires=[constants.MEMBER,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER,
|
||||||
|
constants.POOL]))
|
||||||
|
create_member_flow.add(database_tasks.MarkMemberPendingCreateInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
create_member_flow.add(network_tasks.CalculateDelta(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.DELTAS))
|
||||||
|
create_member_flow.add(network_tasks.HandleNetworkDeltas(
|
||||||
|
requires=constants.DELTAS, provides=constants.ADDED_PORTS))
|
||||||
|
create_member_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
|
||||||
|
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)
|
||||||
|
))
|
||||||
|
create_member_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=(constants.LOADBALANCER, constants.LISTENERS)))
|
||||||
|
create_member_flow.add(database_tasks.MarkMemberActiveInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
create_member_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
create_member_flow.add(database_tasks.
|
||||||
|
MarkLBAndListenersActiveInDB(
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS)))
|
||||||
|
|
||||||
|
return create_member_flow
|
||||||
|
|
||||||
|
def get_delete_member_flow(self):
|
||||||
|
"""Create a flow to delete a member
|
||||||
|
|
||||||
|
:returns: The flow for deleting a member
|
||||||
|
"""
|
||||||
|
delete_member_flow = linear_flow.Flow(constants.DELETE_MEMBER_FLOW)
|
||||||
|
delete_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
|
||||||
|
requires=[constants.MEMBER,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER,
|
||||||
|
constants.POOL]))
|
||||||
|
delete_member_flow.add(database_tasks.MarkMemberPendingDeleteInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
delete_member_flow.add(model_tasks.
|
||||||
|
DeleteModelObject(rebind={constants.OBJECT:
|
||||||
|
constants.MEMBER}))
|
||||||
|
delete_member_flow.add(database_tasks.DeleteMemberInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
delete_member_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
delete_member_flow.add(database_tasks.DecrementMemberQuota(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
delete_member_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
delete_member_flow.add(database_tasks.
|
||||||
|
MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS]))
|
||||||
|
|
||||||
|
return delete_member_flow
|
||||||
|
|
||||||
|
def get_update_member_flow(self):
|
||||||
|
"""Create a flow to update a member
|
||||||
|
|
||||||
|
:returns: The flow for updating a member
|
||||||
|
"""
|
||||||
|
update_member_flow = linear_flow.Flow(constants.UPDATE_MEMBER_FLOW)
|
||||||
|
update_member_flow.add(lifecycle_tasks.MemberToErrorOnRevertTask(
|
||||||
|
requires=[constants.MEMBER,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER,
|
||||||
|
constants.POOL]))
|
||||||
|
update_member_flow.add(database_tasks.MarkMemberPendingUpdateInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
update_member_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_member_flow.add(database_tasks.UpdateMemberInDB(
|
||||||
|
requires=[constants.MEMBER, constants.UPDATE_DICT]))
|
||||||
|
update_member_flow.add(database_tasks.MarkMemberActiveInDB(
|
||||||
|
requires=constants.MEMBER))
|
||||||
|
update_member_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
update_member_flow.add(database_tasks.
|
||||||
|
MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_member_flow
|
||||||
|
|
||||||
|
def get_batch_update_members_flow(self, old_members, new_members,
|
||||||
|
updated_members):
|
||||||
|
"""Create a flow to batch update members
|
||||||
|
|
||||||
|
:returns: The flow for batch updating members
|
||||||
|
"""
|
||||||
|
batch_update_members_flow = linear_flow.Flow(
|
||||||
|
constants.BATCH_UPDATE_MEMBERS_FLOW)
|
||||||
|
unordered_members_flow = unordered_flow.Flow(
|
||||||
|
constants.UNORDERED_MEMBER_UPDATES_FLOW)
|
||||||
|
unordered_members_active_flow = unordered_flow.Flow(
|
||||||
|
constants.UNORDERED_MEMBER_ACTIVE_FLOW)
|
||||||
|
|
||||||
|
# Delete old members
|
||||||
|
unordered_members_flow.add(
|
||||||
|
lifecycle_tasks.MembersToErrorOnRevertTask(
|
||||||
|
inject={constants.MEMBERS: old_members},
|
||||||
|
name='{flow}-deleted'.format(
|
||||||
|
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
|
||||||
|
for m in old_members:
|
||||||
|
unordered_members_flow.add(
|
||||||
|
model_tasks.DeleteModelObject(
|
||||||
|
inject={constants.OBJECT: m},
|
||||||
|
name='{flow}-{id}'.format(
|
||||||
|
id=m.id, flow=constants.DELETE_MODEL_OBJECT_FLOW)))
|
||||||
|
unordered_members_flow.add(database_tasks.DeleteMemberInDB(
|
||||||
|
inject={constants.MEMBER: m},
|
||||||
|
name='{flow}-{id}'.format(
|
||||||
|
id=m.id, flow=constants.DELETE_MEMBER_INDB)))
|
||||||
|
unordered_members_flow.add(database_tasks.DecrementMemberQuota(
|
||||||
|
inject={constants.MEMBER: m},
|
||||||
|
name='{flow}-{id}'.format(
|
||||||
|
id=m.id, flow=constants.DECREMENT_MEMBER_QUOTA_FLOW)))
|
||||||
|
|
||||||
|
# Create new members
|
||||||
|
unordered_members_flow.add(
|
||||||
|
lifecycle_tasks.MembersToErrorOnRevertTask(
|
||||||
|
inject={constants.MEMBERS: new_members},
|
||||||
|
name='{flow}-created'.format(
|
||||||
|
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
|
||||||
|
for m in new_members:
|
||||||
|
unordered_members_active_flow.add(
|
||||||
|
database_tasks.MarkMemberActiveInDB(
|
||||||
|
inject={constants.MEMBER: m},
|
||||||
|
name='{flow}-{id}'.format(
|
||||||
|
id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))
|
||||||
|
|
||||||
|
# Update existing members
|
||||||
|
unordered_members_flow.add(
|
||||||
|
lifecycle_tasks.MembersToErrorOnRevertTask(
|
||||||
|
# updated_members is a list of (obj, dict), only pass `obj`
|
||||||
|
inject={constants.MEMBERS: [m[0] for m in updated_members]},
|
||||||
|
name='{flow}-updated'.format(
|
||||||
|
flow=constants.MEMBER_TO_ERROR_ON_REVERT_FLOW)))
|
||||||
|
for m, um in updated_members:
|
||||||
|
um.pop('id', None)
|
||||||
|
unordered_members_active_flow.add(
|
||||||
|
database_tasks.MarkMemberActiveInDB(
|
||||||
|
inject={constants.MEMBER: m},
|
||||||
|
name='{flow}-{id}'.format(
|
||||||
|
id=m.id, flow=constants.MARK_MEMBER_ACTIVE_INDB)))
|
||||||
|
|
||||||
|
batch_update_members_flow.add(unordered_members_flow)
|
||||||
|
|
||||||
|
# Done, do real updates
|
||||||
|
batch_update_members_flow.add(network_tasks.CalculateDelta(
|
||||||
|
requires=constants.LOADBALANCER,
|
||||||
|
provides=constants.DELTAS))
|
||||||
|
batch_update_members_flow.add(network_tasks.HandleNetworkDeltas(
|
||||||
|
requires=constants.DELTAS, provides=constants.ADDED_PORTS))
|
||||||
|
batch_update_members_flow.add(
|
||||||
|
amphora_driver_tasks.AmphoraePostNetworkPlug(
|
||||||
|
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
|
||||||
|
|
||||||
|
# Update the Listener (this makes the changes active on the Amp)
|
||||||
|
batch_update_members_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=(constants.LOADBALANCER, constants.LISTENERS)))
|
||||||
|
|
||||||
|
# Mark all the members ACTIVE here, then pool then LB/Listeners
|
||||||
|
batch_update_members_flow.add(unordered_members_active_flow)
|
||||||
|
batch_update_members_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
batch_update_members_flow.add(
|
||||||
|
database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=(constants.LOADBALANCER,
|
||||||
|
constants.LISTENERS)))
|
||||||
|
|
||||||
|
return batch_update_members_flow
|
127
octavia/controller/worker/v2/flows/pool_flows.py
Normal file
127
octavia/controller/worker/v2/flows/pool_flows.py
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import database_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import lifecycle_tasks
|
||||||
|
from octavia.controller.worker.v2.tasks import model_tasks
|
||||||
|
|
||||||
|
|
||||||
|
class PoolFlows(object):
|
||||||
|
|
||||||
|
def get_create_pool_flow(self):
|
||||||
|
"""Create a flow to create a pool
|
||||||
|
|
||||||
|
:returns: The flow for creating a pool
|
||||||
|
"""
|
||||||
|
create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW)
|
||||||
|
create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
|
||||||
|
requires=[constants.POOL,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
create_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
create_pool_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return create_pool_flow
|
||||||
|
|
||||||
|
def get_delete_pool_flow(self):
|
||||||
|
"""Create a flow to delete a pool
|
||||||
|
|
||||||
|
:returns: The flow for deleting a pool
|
||||||
|
"""
|
||||||
|
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
|
||||||
|
delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
|
||||||
|
requires=[constants.POOL,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
|
||||||
|
requires=constants.POOL, provides=constants.POOL_CHILD_COUNT))
|
||||||
|
delete_pool_flow.add(model_tasks.DeleteModelObject(
|
||||||
|
rebind={constants.OBJECT: constants.POOL}))
|
||||||
|
delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
delete_pool_flow.add(database_tasks.DeletePoolInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
|
||||||
|
requires=[constants.POOL, constants.POOL_CHILD_COUNT]))
|
||||||
|
delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return delete_pool_flow
|
||||||
|
|
||||||
|
def get_delete_pool_flow_internal(self, name):
|
||||||
|
"""Create a flow to delete a pool, etc.
|
||||||
|
|
||||||
|
:returns: The flow for deleting a pool
|
||||||
|
"""
|
||||||
|
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
|
||||||
|
# health monitor should cascade
|
||||||
|
# members should cascade
|
||||||
|
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
|
||||||
|
name='mark_pool_pending_delete_in_db_' + name,
|
||||||
|
requires=constants.POOL,
|
||||||
|
rebind={constants.POOL: name}))
|
||||||
|
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
|
||||||
|
name='count_pool_children_for_quota_' + name,
|
||||||
|
requires=constants.POOL,
|
||||||
|
provides=constants.POOL_CHILD_COUNT,
|
||||||
|
rebind={constants.POOL: name}))
|
||||||
|
delete_pool_flow.add(model_tasks.DeleteModelObject(
|
||||||
|
name='delete_model_object_' + name,
|
||||||
|
rebind={constants.OBJECT: name}))
|
||||||
|
delete_pool_flow.add(database_tasks.DeletePoolInDB(
|
||||||
|
name='delete_pool_in_db_' + name,
|
||||||
|
requires=constants.POOL,
|
||||||
|
rebind={constants.POOL: name}))
|
||||||
|
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
|
||||||
|
name='decrement_pool_quota_' + name,
|
||||||
|
requires=[constants.POOL, constants.POOL_CHILD_COUNT],
|
||||||
|
rebind={constants.POOL: name}))
|
||||||
|
|
||||||
|
return delete_pool_flow
|
||||||
|
|
||||||
|
def get_update_pool_flow(self):
|
||||||
|
"""Create a flow to update a pool
|
||||||
|
|
||||||
|
:returns: The flow for updating a pool
|
||||||
|
"""
|
||||||
|
update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW)
|
||||||
|
update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
|
||||||
|
requires=[constants.POOL,
|
||||||
|
constants.LISTENERS,
|
||||||
|
constants.LOADBALANCER]))
|
||||||
|
update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
update_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
update_pool_flow.add(database_tasks.UpdatePoolInDB(
|
||||||
|
requires=[constants.POOL, constants.UPDATE_DICT]))
|
||||||
|
update_pool_flow.add(database_tasks.MarkPoolActiveInDB(
|
||||||
|
requires=constants.POOL))
|
||||||
|
update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
|
||||||
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
|
|
||||||
|
return update_pool_flow
|
11
octavia/controller/worker/v2/tasks/__init__.py
Normal file
11
octavia/controller/worker/v2/tasks/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
397
octavia/controller/worker/v2/tasks/amphora_driver_tasks.py
Normal file
397
octavia/controller/worker/v2/tasks/amphora_driver_tasks.py
Normal file
@ -0,0 +1,397 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import six
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
from taskflow import task
|
||||||
|
from taskflow.types import failure
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.agent import agent_jinja_cfg
|
||||||
|
from octavia.amphorae.driver_exceptions import exceptions as driver_except
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker import task_utils as task_utilities
|
||||||
|
from octavia.db import api as db_apis
|
||||||
|
from octavia.db import repositories as repo
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAmphoraTask(task.Task):
|
||||||
|
"""Base task to load drivers common to the tasks."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(BaseAmphoraTask, self).__init__(**kwargs)
|
||||||
|
self.amphora_driver = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.amphora.drivers',
|
||||||
|
name=CONF.controller_worker.amphora_driver,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
self.amphora_repo = repo.AmphoraRepository()
|
||||||
|
self.listener_repo = repo.ListenerRepository()
|
||||||
|
self.loadbalancer_repo = repo.LoadBalancerRepository()
|
||||||
|
self.task_utils = task_utilities.TaskUtils()
|
||||||
|
|
||||||
|
|
||||||
|
class AmpListenersUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to update the listeners on one amphora."""
|
||||||
|
|
||||||
|
def execute(self, listeners, amphora_index, amphorae, timeout_dict=()):
|
||||||
|
# Note, we don't want this to cause a revert as it may be used
|
||||||
|
# in a failover flow with both amps failing. Skip it and let
|
||||||
|
# health manager fix it.
|
||||||
|
try:
|
||||||
|
self.amphora_driver.update_amphora_listeners(
|
||||||
|
listeners, amphora_index, amphorae, timeout_dict)
|
||||||
|
except Exception as e:
|
||||||
|
amphora_id = amphorae[amphora_index].id
|
||||||
|
LOG.error('Failed to update listeners on amphora %s. Skipping '
|
||||||
|
'this amphora as it is failing to update due to: %s',
|
||||||
|
amphora_id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora_id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenersUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to update amphora with all specified listeners' configurations."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, listeners):
|
||||||
|
"""Execute updates per listener for an amphora."""
|
||||||
|
for listener in listeners:
|
||||||
|
listener.load_balancer = loadbalancer
|
||||||
|
self.amphora_driver.update(listener, loadbalancer.vip)
|
||||||
|
|
||||||
|
def revert(self, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle failed listeners updates."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listeners updates.")
|
||||||
|
|
||||||
|
for listener in loadbalancer.listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerStop(BaseAmphoraTask):
|
||||||
|
"""Task to stop the listener on the vip."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, listener):
|
||||||
|
"""Execute listener stop routines for an amphora."""
|
||||||
|
self.amphora_driver.stop(listener, loadbalancer.vip)
|
||||||
|
LOG.debug("Stopped the listener on the vip")
|
||||||
|
|
||||||
|
def revert(self, listener, *args, **kwargs):
|
||||||
|
"""Handle a failed listener stop."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listener stop.")
|
||||||
|
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerStart(BaseAmphoraTask):
|
||||||
|
"""Task to start the listener on the vip."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, listener):
|
||||||
|
"""Execute listener start routines for an amphora."""
|
||||||
|
self.amphora_driver.start(listener, loadbalancer.vip)
|
||||||
|
LOG.debug("Started the listener on the vip")
|
||||||
|
|
||||||
|
def revert(self, listener, *args, **kwargs):
|
||||||
|
"""Handle a failed listener start."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listener start.")
|
||||||
|
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenersStart(BaseAmphoraTask):
|
||||||
|
"""Task to start all listeners on the vip."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, listeners, amphora=None):
|
||||||
|
"""Execute listener start routines for listeners on an amphora."""
|
||||||
|
for listener in listeners:
|
||||||
|
self.amphora_driver.start(listener, loadbalancer.vip, amphora)
|
||||||
|
LOG.debug("Started the listeners on the vip")
|
||||||
|
|
||||||
|
def revert(self, listeners, *args, **kwargs):
|
||||||
|
"""Handle failed listeners starts."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listeners starts.")
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerDelete(BaseAmphoraTask):
|
||||||
|
"""Task to delete the listener on the vip."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, listener):
|
||||||
|
"""Execute listener delete routines for an amphora."""
|
||||||
|
self.amphora_driver.delete(listener, loadbalancer.vip)
|
||||||
|
LOG.debug("Deleted the listener on the vip")
|
||||||
|
|
||||||
|
def revert(self, listener, *args, **kwargs):
|
||||||
|
"""Handle a failed listener delete."""
|
||||||
|
|
||||||
|
LOG.warning("Reverting listener delete.")
|
||||||
|
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraGetInfo(BaseAmphoraTask):
|
||||||
|
"""Task to get information on an amphora."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
"""Execute get_info routine for an amphora."""
|
||||||
|
self.amphora_driver.get_info(amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraGetDiagnostics(BaseAmphoraTask):
|
||||||
|
"""Task to get diagnostics on the amphora and the loadbalancers."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
"""Execute get_diagnostic routine for an amphora."""
|
||||||
|
self.amphora_driver.get_diagnostics(amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraFinalize(BaseAmphoraTask):
|
||||||
|
"""Task to finalize the amphora before any listeners are configured."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
"""Execute finalize_amphora routine."""
|
||||||
|
self.amphora_driver.finalize_amphora(amphora)
|
||||||
|
LOG.debug("Finalized the amphora.")
|
||||||
|
|
||||||
|
def revert(self, result, amphora, *args, **kwargs):
|
||||||
|
"""Handle a failed amphora finalize."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting amphora finalize.")
|
||||||
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraPostNetworkPlug(BaseAmphoraTask):
|
||||||
|
"""Task to notify the amphora post network plug."""
|
||||||
|
|
||||||
|
def execute(self, amphora, ports):
|
||||||
|
"""Execute post_network_plug routine."""
|
||||||
|
for port in ports:
|
||||||
|
self.amphora_driver.post_network_plug(amphora, port)
|
||||||
|
LOG.debug("post_network_plug called on compute instance "
|
||||||
|
"%(compute_id)s for port %(port_id)s",
|
||||||
|
{"compute_id": amphora.compute_id, "port_id": port.id})
|
||||||
|
|
||||||
|
def revert(self, result, amphora, *args, **kwargs):
|
||||||
|
"""Handle a failed post network plug."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting post network plug.")
|
||||||
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraePostNetworkPlug(BaseAmphoraTask):
|
||||||
|
"""Task to notify the amphorae post network plug."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, added_ports):
|
||||||
|
"""Execute post_network_plug routine."""
|
||||||
|
amp_post_plug = AmphoraPostNetworkPlug()
|
||||||
|
for amphora in loadbalancer.amphorae:
|
||||||
|
if amphora.id in added_ports:
|
||||||
|
amp_post_plug.execute(amphora, added_ports[amphora.id])
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, added_ports, *args, **kwargs):
|
||||||
|
"""Handle a failed post network plug."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting post network plug.")
|
||||||
|
for amphora in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
loadbalancer.amphorae):
|
||||||
|
|
||||||
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraPostVIPPlug(BaseAmphoraTask):
|
||||||
|
"""Task to notify the amphora post VIP plug."""
|
||||||
|
|
||||||
|
def execute(self, amphora, loadbalancer, amphorae_network_config):
|
||||||
|
"""Execute post_vip_routine."""
|
||||||
|
self.amphora_driver.post_vip_plug(
|
||||||
|
amphora, loadbalancer, amphorae_network_config)
|
||||||
|
LOG.debug("Notified amphora of vip plug")
|
||||||
|
|
||||||
|
def revert(self, result, amphora, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle a failed amphora vip plug notification."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting post vip plug.")
|
||||||
|
self.task_utils.mark_amphora_status_error(amphora.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraePostVIPPlug(BaseAmphoraTask):
|
||||||
|
"""Task to notify the amphorae post VIP plug."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphorae_network_config):
|
||||||
|
"""Execute post_vip_plug across the amphorae."""
|
||||||
|
amp_post_vip_plug = AmphoraPostVIPPlug()
|
||||||
|
for amphora in loadbalancer.amphorae:
|
||||||
|
amp_post_vip_plug.execute(amphora,
|
||||||
|
loadbalancer,
|
||||||
|
amphorae_network_config)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle a failed amphora vip plug notification."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting amphorae post vip plug.")
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraCertUpload(BaseAmphoraTask):
|
||||||
|
"""Upload a certificate to the amphora."""
|
||||||
|
|
||||||
|
def execute(self, amphora, server_pem):
|
||||||
|
"""Execute cert_update_amphora routine."""
|
||||||
|
LOG.debug("Upload cert in amphora REST driver")
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
self.amphora_driver.upload_cert_amp(amphora, fer.decrypt(server_pem))
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
||||||
|
"""Task to get and update the VRRP interface device name from amphora."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Execute post_vip_routine."""
|
||||||
|
amps = []
|
||||||
|
timeout_dict = {
|
||||||
|
constants.CONN_MAX_RETRIES:
|
||||||
|
CONF.haproxy_amphora.active_connection_max_retries,
|
||||||
|
constants.CONN_RETRY_INTERVAL:
|
||||||
|
CONF.haproxy_amphora.active_connection_rety_interval}
|
||||||
|
for amp in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
loadbalancer.amphorae):
|
||||||
|
|
||||||
|
try:
|
||||||
|
interface = self.amphora_driver.get_vrrp_interface(
|
||||||
|
amp, timeout_dict=timeout_dict)
|
||||||
|
except Exception as e:
|
||||||
|
# This can occur when an active/standby LB has no listener
|
||||||
|
LOG.error('Failed to get amphora VRRP interface on amphora '
|
||||||
|
'%s. Skipping this amphora as it is failing due to: '
|
||||||
|
'%s', amp.id, str(e))
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||||
|
vrrp_interface=interface)
|
||||||
|
amps.append(self.amphora_repo.get(db_apis.get_session(),
|
||||||
|
id=amp.id))
|
||||||
|
loadbalancer.amphorae = amps
|
||||||
|
return loadbalancer
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle a failed amphora vip plug notification."""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Reverting Get Amphora VRRP Interface.")
|
||||||
|
for amp in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
loadbalancer.amphorae):
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amp.id,
|
||||||
|
vrrp_interface=None)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to update amphora %(amp)s "
|
||||||
|
"VRRP interface to None due to: %(except)s",
|
||||||
|
{'amp': amp.id, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraVRRPUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to update the VRRP configuration of the loadbalancer amphorae."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphorae_network_config):
|
||||||
|
"""Execute update_vrrp_conf."""
|
||||||
|
self.amphora_driver.update_vrrp_conf(loadbalancer,
|
||||||
|
amphorae_network_config)
|
||||||
|
LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraVRRPStop(BaseAmphoraTask):
|
||||||
|
"""Task to stop keepalived of all amphorae of a LB."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
self.amphora_driver.stop_vrrp_service(loadbalancer)
|
||||||
|
LOG.debug("Stopped VRRP of loadbalancer %s amphorae",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraVRRPStart(BaseAmphoraTask):
|
||||||
|
"""Task to start keepalived of all amphorae of a LB."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
self.amphora_driver.start_vrrp_service(loadbalancer)
|
||||||
|
LOG.debug("Started VRRP of loadbalancer %s amphorae",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraComputeConnectivityWait(BaseAmphoraTask):
|
||||||
|
"""Task to wait for the compute instance to be up."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
"""Execute get_info routine for an amphora until it responds."""
|
||||||
|
try:
|
||||||
|
amp_info = self.amphora_driver.get_info(amphora)
|
||||||
|
LOG.debug('Successfuly connected to amphora %s: %s',
|
||||||
|
amphora.id, amp_info)
|
||||||
|
except driver_except.TimeOutException:
|
||||||
|
LOG.error("Amphora compute instance failed to become reachable. "
|
||||||
|
"This either means the compute driver failed to fully "
|
||||||
|
"boot the instance inside the timeout interval or the "
|
||||||
|
"instance is not reachable via the lb-mgmt-net.")
|
||||||
|
self.amphora_repo.update(db_apis.get_session(), amphora.id,
|
||||||
|
status=constants.ERROR)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraConfigUpdate(BaseAmphoraTask):
|
||||||
|
"""Task to push a new amphora agent configuration to the amphroa."""
|
||||||
|
|
||||||
|
def execute(self, amphora, flavor):
|
||||||
|
# Extract any flavor based settings
|
||||||
|
if flavor:
|
||||||
|
topology = flavor.get(constants.LOADBALANCER_TOPOLOGY,
|
||||||
|
CONF.controller_worker.loadbalancer_topology)
|
||||||
|
else:
|
||||||
|
topology = CONF.controller_worker.loadbalancer_topology
|
||||||
|
|
||||||
|
# Build the amphora agent config
|
||||||
|
agent_cfg_tmpl = agent_jinja_cfg.AgentJinjaTemplater()
|
||||||
|
agent_config = agent_cfg_tmpl.build_agent_config(amphora.id, topology)
|
||||||
|
|
||||||
|
# Push the new configuration to the amphroa
|
||||||
|
try:
|
||||||
|
self.amphora_driver.update_amphora_agent_config(amphora,
|
||||||
|
agent_config)
|
||||||
|
except driver_except.AmpDriverNotImplementedError:
|
||||||
|
LOG.error('Amphora {} does not support agent configuration '
|
||||||
|
'update. Please update the amphora image for this '
|
||||||
|
'amphora. Skipping.'.format(amphora.id))
|
51
octavia/controller/worker/v2/tasks/cert_task.py
Normal file
51
octavia/controller/worker/v2/tasks/cert_task.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
from oslo_config import cfg
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
from taskflow import task
|
||||||
|
|
||||||
|
from octavia.common import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCertTask(task.Task):
|
||||||
|
"""Base task to load drivers common to the tasks."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(BaseCertTask, self).__init__(**kwargs)
|
||||||
|
self.cert_generator = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.cert_generator',
|
||||||
|
name=CONF.certificates.cert_generator,
|
||||||
|
invoke_on_load=True,
|
||||||
|
).driver
|
||||||
|
|
||||||
|
|
||||||
|
class GenerateServerPEMTask(BaseCertTask):
|
||||||
|
"""Create the server certs for the agent comm
|
||||||
|
|
||||||
|
Use the amphora_id for the CN
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, amphora_id):
|
||||||
|
cert = self.cert_generator.generate_cert_key_pair(
|
||||||
|
cn=amphora_id,
|
||||||
|
validity=CONF.certificates.cert_validity_time)
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
|
||||||
|
return fer.encrypt(cert.certificate + cert.private_key)
|
251
octavia/controller/worker/v2/tasks/compute_tasks.py
Normal file
251
octavia/controller/worker/v2/tasks/compute_tasks.py
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
from taskflow import task
|
||||||
|
from taskflow.types import failure
|
||||||
|
|
||||||
|
from octavia.amphorae.backends.agent import agent_jinja_cfg
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.common.jinja import user_data_jinja_cfg
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker import amphora_rate_limit
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseComputeTask(task.Task):
|
||||||
|
"""Base task to load drivers common to the tasks."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(BaseComputeTask, self).__init__(**kwargs)
|
||||||
|
self.compute = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.compute.drivers',
|
||||||
|
name=CONF.controller_worker.compute_driver,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit()
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeCreate(BaseComputeTask):
|
||||||
|
"""Create the compute instance for a new amphora."""
|
||||||
|
|
||||||
|
def execute(self, amphora_id, config_drive_files=None,
|
||||||
|
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
|
server_group_id=None, ports=None, flavor=None):
|
||||||
|
"""Create an amphora
|
||||||
|
|
||||||
|
:returns: an amphora
|
||||||
|
"""
|
||||||
|
ports = ports or []
|
||||||
|
network_ids = CONF.controller_worker.amp_boot_network_list[:]
|
||||||
|
config_drive_files = config_drive_files or {}
|
||||||
|
user_data = None
|
||||||
|
LOG.debug("Compute create execute for amphora with id %s", amphora_id)
|
||||||
|
|
||||||
|
user_data_config_drive = CONF.controller_worker.user_data_config_drive
|
||||||
|
|
||||||
|
key_name = CONF.controller_worker.amp_ssh_key_name
|
||||||
|
# TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike.
|
||||||
|
# Remove the following two lines in the S release.
|
||||||
|
ssh_access = CONF.controller_worker.amp_ssh_access_allowed
|
||||||
|
key_name = None if not ssh_access else key_name
|
||||||
|
|
||||||
|
# Apply an Octavia flavor customizations
|
||||||
|
if flavor:
|
||||||
|
topology = flavor.get(constants.LOADBALANCER_TOPOLOGY,
|
||||||
|
CONF.controller_worker.loadbalancer_topology)
|
||||||
|
amp_compute_flavor = flavor.get(
|
||||||
|
constants.COMPUTE_FLAVOR, CONF.controller_worker.amp_flavor_id)
|
||||||
|
else:
|
||||||
|
topology = CONF.controller_worker.loadbalancer_topology
|
||||||
|
amp_compute_flavor = CONF.controller_worker.amp_flavor_id
|
||||||
|
|
||||||
|
try:
|
||||||
|
if CONF.haproxy_amphora.build_rate_limit != -1:
|
||||||
|
self.rate_limit.add_to_build_request_queue(
|
||||||
|
amphora_id, build_type_priority)
|
||||||
|
|
||||||
|
agent_cfg = agent_jinja_cfg.AgentJinjaTemplater()
|
||||||
|
config_drive_files['/etc/octavia/amphora-agent.conf'] = (
|
||||||
|
agent_cfg.build_agent_config(amphora_id, topology))
|
||||||
|
if user_data_config_drive:
|
||||||
|
udtemplater = user_data_jinja_cfg.UserDataJinjaCfg()
|
||||||
|
user_data = udtemplater.build_user_data_config(
|
||||||
|
config_drive_files)
|
||||||
|
config_drive_files = None
|
||||||
|
|
||||||
|
compute_id = self.compute.build(
|
||||||
|
name="amphora-" + amphora_id,
|
||||||
|
amphora_flavor=amp_compute_flavor,
|
||||||
|
image_id=CONF.controller_worker.amp_image_id,
|
||||||
|
image_tag=CONF.controller_worker.amp_image_tag,
|
||||||
|
image_owner=CONF.controller_worker.amp_image_owner_id,
|
||||||
|
key_name=key_name,
|
||||||
|
sec_groups=CONF.controller_worker.amp_secgroup_list,
|
||||||
|
network_ids=network_ids,
|
||||||
|
port_ids=[port.id for port in ports],
|
||||||
|
config_drive_files=config_drive_files,
|
||||||
|
user_data=user_data,
|
||||||
|
server_group_id=server_group_id)
|
||||||
|
|
||||||
|
LOG.debug("Server created with id: %s for amphora id: %s",
|
||||||
|
compute_id, amphora_id)
|
||||||
|
return compute_id
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Compute create for amphora id: %s failed",
|
||||||
|
amphora_id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def revert(self, result, amphora_id, *args, **kwargs):
|
||||||
|
"""This method will revert the creation of the
|
||||||
|
|
||||||
|
amphora. So it will just delete it in this flow
|
||||||
|
"""
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
compute_id = result
|
||||||
|
LOG.warning("Reverting compute create for amphora with id "
|
||||||
|
"%(amp)s and compute id: %(comp)s",
|
||||||
|
{'amp': amphora_id, 'comp': compute_id})
|
||||||
|
try:
|
||||||
|
self.compute.delete(compute_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Reverting compute create failed")
|
||||||
|
|
||||||
|
|
||||||
|
class CertComputeCreate(ComputeCreate):
|
||||||
|
def execute(self, amphora_id, server_pem,
|
||||||
|
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
|
||||||
|
server_group_id=None, ports=None, flavor=None):
|
||||||
|
"""Create an amphora
|
||||||
|
|
||||||
|
:returns: an amphora
|
||||||
|
"""
|
||||||
|
|
||||||
|
# load client certificate
|
||||||
|
with open(CONF.controller_worker.client_ca, 'r') as client_ca:
|
||||||
|
ca = client_ca.read()
|
||||||
|
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
config_drive_files = {
|
||||||
|
'/etc/octavia/certs/server.pem': fer.decrypt(server_pem),
|
||||||
|
'/etc/octavia/certs/client_ca.pem': ca}
|
||||||
|
return super(CertComputeCreate, self).execute(
|
||||||
|
amphora_id, config_drive_files=config_drive_files,
|
||||||
|
build_type_priority=build_type_priority,
|
||||||
|
server_group_id=server_group_id, ports=ports, flavor=flavor)
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
|
||||||
|
"""Delete the amphorae on a load balancer.
|
||||||
|
|
||||||
|
Iterate through amphorae, deleting them
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
for amp in loadbalancer.amphorae:
|
||||||
|
# The compute driver will already handle NotFound
|
||||||
|
try:
|
||||||
|
self.compute.delete(amp.compute_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Compute delete for amphora id: %s failed",
|
||||||
|
amp.id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeDelete(BaseComputeTask):
|
||||||
|
def execute(self, amphora):
|
||||||
|
LOG.debug("Compute Delete execute for amphora with id %s", amphora.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.compute.delete(amphora.compute_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Compute delete for amphora id: %s failed",
|
||||||
|
amphora.id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeActiveWait(BaseComputeTask):
|
||||||
|
"""Wait for the compute driver to mark the amphora active."""
|
||||||
|
|
||||||
|
def execute(self, compute_id, amphora_id):
|
||||||
|
"""Wait for the compute driver to mark the amphora active
|
||||||
|
|
||||||
|
:raises: Generic exception if the amphora is not active
|
||||||
|
:returns: An amphora object
|
||||||
|
"""
|
||||||
|
for i in range(CONF.controller_worker.amp_active_retries):
|
||||||
|
amp, fault = self.compute.get_amphora(compute_id)
|
||||||
|
if amp.status == constants.ACTIVE:
|
||||||
|
if CONF.haproxy_amphora.build_rate_limit != -1:
|
||||||
|
self.rate_limit.remove_from_build_req_queue(amphora_id)
|
||||||
|
return amp
|
||||||
|
if amp.status == constants.ERROR:
|
||||||
|
raise exceptions.ComputeBuildException(fault=fault)
|
||||||
|
time.sleep(CONF.controller_worker.amp_active_wait_sec)
|
||||||
|
|
||||||
|
raise exceptions.ComputeWaitTimeoutException(id=compute_id)
|
||||||
|
|
||||||
|
|
||||||
|
class NovaServerGroupCreate(BaseComputeTask):
|
||||||
|
def execute(self, loadbalancer_id):
|
||||||
|
"""Create a server group by nova client api
|
||||||
|
|
||||||
|
:param loadbalancer_id: will be used for server group's name
|
||||||
|
:param policy: will used for server group's policy
|
||||||
|
:raises: Generic exception if the server group is not created
|
||||||
|
:returns: server group's id
|
||||||
|
"""
|
||||||
|
|
||||||
|
name = 'octavia-lb-' + loadbalancer_id
|
||||||
|
server_group = self.compute.create_server_group(
|
||||||
|
name, CONF.nova.anti_affinity_policy)
|
||||||
|
LOG.debug("Server Group created with id: %s for load balancer id: "
|
||||||
|
"%s", server_group.id, loadbalancer_id)
|
||||||
|
return server_group.id
|
||||||
|
|
||||||
|
def revert(self, result, *args, **kwargs):
|
||||||
|
"""This method will revert the creation of the
|
||||||
|
|
||||||
|
:param result: here it refers to server group id
|
||||||
|
"""
|
||||||
|
server_group_id = result
|
||||||
|
LOG.warning("Reverting server group create with id:%s",
|
||||||
|
server_group_id)
|
||||||
|
try:
|
||||||
|
self.compute.delete_server_group(server_group_id)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to delete server group. Resources may "
|
||||||
|
"still be in use for server group: %(sg)s due to "
|
||||||
|
"error: %(except)s",
|
||||||
|
{'sg': server_group_id, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
class NovaServerGroupDelete(BaseComputeTask):
|
||||||
|
def execute(self, server_group_id):
|
||||||
|
if server_group_id is not None:
|
||||||
|
self.compute.delete_server_group(server_group_id)
|
||||||
|
else:
|
||||||
|
return
|
2707
octavia/controller/worker/v2/tasks/database_tasks.py
Normal file
2707
octavia/controller/worker/v2/tasks/database_tasks.py
Normal file
File diff suppressed because it is too large
Load Diff
173
octavia/controller/worker/v2/tasks/lifecycle_tasks.py
Normal file
173
octavia/controller/worker/v2/tasks/lifecycle_tasks.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
# Copyright 2016 Rackspace
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from taskflow import task
|
||||||
|
|
||||||
|
from octavia.controller.worker import task_utils as task_utilities
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLifecycleTask(task.Task):
|
||||||
|
"""Base task to instansiate common classes."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.task_utils = task_utilities.TaskUtils()
|
||||||
|
super(BaseLifecycleTask, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraIDToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to checkpoint Amphora lifecycle milestones."""
|
||||||
|
|
||||||
|
def execute(self, amphora_id):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, amphora_id, *args, **kwargs):
|
||||||
|
self.task_utils.mark_amphora_status_error(amphora_id)
|
||||||
|
|
||||||
|
|
||||||
|
class AmphoraToErrorOnRevertTask(AmphoraIDToErrorOnRevertTask):
|
||||||
|
"""Task to checkpoint Amphora lifecycle milestones."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, amphora, *args, **kwargs):
|
||||||
|
super(AmphoraToErrorOnRevertTask, self).revert(amphora.id)
|
||||||
|
|
||||||
|
|
||||||
|
class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a member to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, health_mon, listeners, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
|
||||||
|
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
|
||||||
|
self.task_utils.mark_pool_prov_status_active(health_mon.pool_id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a l7policy to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, l7policy, listeners, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs):
|
||||||
|
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a l7rule to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, l7rule, listeners, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs):
|
||||||
|
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
|
||||||
|
self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a listener to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, listener):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, listener, *args, **kwargs):
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(
|
||||||
|
listener.load_balancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenersToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set listeners to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, listeners, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, listeners, loadbalancer, *args, **kwargs):
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(
|
||||||
|
loadbalancer.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_error(listener.id)
|
||||||
|
|
||||||
|
|
||||||
|
class LoadBalancerIDToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set the load balancer to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer_id):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, loadbalancer_id, *args, **kwargs):
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer_id)
|
||||||
|
|
||||||
|
|
||||||
|
class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask):
|
||||||
|
"""Task to set the load balancer to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, loadbalancer, *args, **kwargs):
|
||||||
|
super(LoadBalancerToErrorOnRevertTask, self).revert(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class MemberToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a member to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, member, listeners, loadbalancer, pool):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, member, listeners, loadbalancer, pool, *args, **kwargs):
|
||||||
|
self.task_utils.mark_member_prov_status_error(member.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
||||||
|
self.task_utils.mark_pool_prov_status_active(pool.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class MembersToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set members to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, members, listeners, loadbalancer, pool):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, members, listeners, loadbalancer, pool, *args, **kwargs):
|
||||||
|
for m in members:
|
||||||
|
self.task_utils.mark_member_prov_status_error(m.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
||||||
|
self.task_utils.mark_pool_prov_status_active(pool.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class PoolToErrorOnRevertTask(BaseLifecycleTask):
|
||||||
|
"""Task to set a pool to ERROR on revert."""
|
||||||
|
|
||||||
|
def execute(self, pool, listeners, loadbalancer):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def revert(self, pool, listeners, loadbalancer, *args, **kwargs):
|
||||||
|
self.task_utils.mark_pool_prov_status_error(pool.id)
|
||||||
|
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
|
||||||
|
for listener in listeners:
|
||||||
|
self.task_utils.mark_listener_prov_status_active(listener.id)
|
41
octavia/controller/worker/v2/tasks/model_tasks.py
Normal file
41
octavia/controller/worker/v2/tasks/model_tasks.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow import task
|
||||||
|
|
||||||
|
|
||||||
|
class DeleteModelObject(task.Task):
|
||||||
|
"""Task to delete an object in a model."""
|
||||||
|
|
||||||
|
def execute(self, object):
|
||||||
|
|
||||||
|
object.delete()
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateAttributes(task.Task):
|
||||||
|
"""Task to update an object for changes."""
|
||||||
|
|
||||||
|
def execute(self, object, update_dict):
|
||||||
|
"""Update an object and its associated resources.
|
||||||
|
|
||||||
|
Note: This relies on the data_model update() methods to handle complex
|
||||||
|
objects with nested objects (LoadBalancer.vip,
|
||||||
|
Pool.session_persistence, etc.)
|
||||||
|
|
||||||
|
:param object: The object will be updated.
|
||||||
|
:param update_dict: The updates dictionary.
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
object.update(update_dict)
|
659
octavia/controller/worker/v2/tasks/network_tasks.py
Normal file
659
octavia/controller/worker/v2/tasks/network_tasks.py
Normal file
@ -0,0 +1,659 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import six
|
||||||
|
from taskflow import task
|
||||||
|
from taskflow.types import failure
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker import task_utils
|
||||||
|
from octavia.network import base
|
||||||
|
from octavia.network import data_models as n_data_models
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class BaseNetworkTask(task.Task):
|
||||||
|
"""Base task to load drivers common to the tasks."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(BaseNetworkTask, self).__init__(**kwargs)
|
||||||
|
self._network_driver = None
|
||||||
|
self.task_utils = task_utils.TaskUtils()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def network_driver(self):
|
||||||
|
if self._network_driver is None:
|
||||||
|
self._network_driver = utils.get_network_driver()
|
||||||
|
return self._network_driver
|
||||||
|
|
||||||
|
|
||||||
|
class CalculateAmphoraDelta(BaseNetworkTask):
|
||||||
|
|
||||||
|
default_provides = constants.DELTA
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora):
|
||||||
|
LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
|
# Figure out what networks we want
|
||||||
|
# seed with lb network(s)
|
||||||
|
vrrp_port = self.network_driver.get_port(amphora.vrrp_port_id)
|
||||||
|
desired_network_ids = {vrrp_port.network_id}.union(
|
||||||
|
CONF.controller_worker.amp_boot_network_list)
|
||||||
|
|
||||||
|
for pool in loadbalancer.pools:
|
||||||
|
member_networks = [
|
||||||
|
self.network_driver.get_subnet(member.subnet_id).network_id
|
||||||
|
for member in pool.members
|
||||||
|
if member.subnet_id
|
||||||
|
]
|
||||||
|
desired_network_ids.update(member_networks)
|
||||||
|
|
||||||
|
nics = self.network_driver.get_plugged_networks(amphora.compute_id)
|
||||||
|
# assume we don't have two nics in the same network
|
||||||
|
actual_network_nics = dict((nic.network_id, nic) for nic in nics)
|
||||||
|
|
||||||
|
del_ids = set(actual_network_nics) - desired_network_ids
|
||||||
|
delete_nics = list(
|
||||||
|
actual_network_nics[net_id] for net_id in del_ids)
|
||||||
|
|
||||||
|
add_ids = desired_network_ids - set(actual_network_nics)
|
||||||
|
add_nics = list(n_data_models.Interface(
|
||||||
|
network_id=net_id) for net_id in add_ids)
|
||||||
|
delta = n_data_models.Delta(
|
||||||
|
amphora_id=amphora.id, compute_id=amphora.compute_id,
|
||||||
|
add_nics=add_nics, delete_nics=delete_nics)
|
||||||
|
return delta
|
||||||
|
|
||||||
|
|
||||||
|
class CalculateDelta(BaseNetworkTask):
|
||||||
|
"""Task to calculate the delta between
|
||||||
|
|
||||||
|
the nics on the amphora and the ones
|
||||||
|
we need. Returns a list for
|
||||||
|
plumbing them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
default_provides = constants.DELTAS
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Compute which NICs need to be plugged
|
||||||
|
|
||||||
|
for the amphora to become operational.
|
||||||
|
|
||||||
|
:param loadbalancer: the loadbalancer to calculate deltas for all
|
||||||
|
amphorae
|
||||||
|
:returns: dict of octavia.network.data_models.Delta keyed off amphora
|
||||||
|
id
|
||||||
|
"""
|
||||||
|
|
||||||
|
calculate_amp = CalculateAmphoraDelta()
|
||||||
|
deltas = {}
|
||||||
|
for amphora in six.moves.filter(
|
||||||
|
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
|
||||||
|
loadbalancer.amphorae):
|
||||||
|
|
||||||
|
delta = calculate_amp.execute(loadbalancer, amphora)
|
||||||
|
deltas[amphora.id] = delta
|
||||||
|
return deltas
|
||||||
|
|
||||||
|
|
||||||
|
class GetPlumbedNetworks(BaseNetworkTask):
|
||||||
|
"""Task to figure out the NICS on an amphora.
|
||||||
|
|
||||||
|
This will likely move into the amphora driver
|
||||||
|
:returns: Array of networks
|
||||||
|
"""
|
||||||
|
|
||||||
|
default_provides = constants.NICS
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
"""Get plumbed networks for the amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
|
return self.network_driver.get_plugged_networks(amphora.compute_id)
|
||||||
|
|
||||||
|
|
||||||
|
class PlugNetworks(BaseNetworkTask):
|
||||||
|
"""Task to plug the networks.
|
||||||
|
|
||||||
|
This uses the delta to add all missing networks/nics
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, amphora, delta):
|
||||||
|
"""Update the amphora networks for the delta."""
|
||||||
|
|
||||||
|
LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
|
if not delta:
|
||||||
|
LOG.debug("No network deltas for amphora id: %s", amphora.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
# add nics
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
self.network_driver.plug_network(amphora.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
|
||||||
|
def revert(self, amphora, delta, *args, **kwargs):
|
||||||
|
"""Handle a failed network plug by removing all nics added."""
|
||||||
|
|
||||||
|
LOG.warning("Unable to plug networks for amp id %s", amphora.id)
|
||||||
|
if not delta:
|
||||||
|
return
|
||||||
|
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(amphora.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except base.NetworkNotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UnPlugNetworks(BaseNetworkTask):
|
||||||
|
"""Task to unplug the networks
|
||||||
|
|
||||||
|
Loop over all nics and unplug them
|
||||||
|
based on delta
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, amphora, delta):
|
||||||
|
"""Unplug the networks."""
|
||||||
|
|
||||||
|
LOG.debug("Unplug network for amphora")
|
||||||
|
if not delta:
|
||||||
|
LOG.debug("No network deltas for amphora id: %s", amphora.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
for nic in delta.delete_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(amphora.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except base.NetworkNotFound:
|
||||||
|
LOG.debug("Network %d not found", nic.network_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Unable to unplug network")
|
||||||
|
# TODO(xgerman) follow up if that makes sense
|
||||||
|
|
||||||
|
|
||||||
|
class GetMemberPorts(BaseNetworkTask):
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora):
|
||||||
|
vip_port = self.network_driver.get_port(loadbalancer.vip.port_id)
|
||||||
|
member_ports = []
|
||||||
|
interfaces = self.network_driver.get_plugged_networks(
|
||||||
|
amphora.compute_id)
|
||||||
|
for interface in interfaces:
|
||||||
|
port = self.network_driver.get_port(interface.port_id)
|
||||||
|
if vip_port.network_id == port.network_id:
|
||||||
|
continue
|
||||||
|
port.network = self.network_driver.get_network(port.network_id)
|
||||||
|
for fixed_ip in port.fixed_ips:
|
||||||
|
if amphora.lb_network_ip == fixed_ip.ip_address:
|
||||||
|
break
|
||||||
|
fixed_ip.subnet = self.network_driver.get_subnet(
|
||||||
|
fixed_ip.subnet_id)
|
||||||
|
# Only add the port to the list if the IP wasn't the mgmt IP
|
||||||
|
else:
|
||||||
|
member_ports.append(port)
|
||||||
|
return member_ports
|
||||||
|
|
||||||
|
|
||||||
|
class HandleNetworkDelta(BaseNetworkTask):
|
||||||
|
"""Task to plug and unplug networks
|
||||||
|
|
||||||
|
Plug or unplug networks based on delta
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, amphora, delta):
|
||||||
|
"""Handle network plugging based off deltas."""
|
||||||
|
added_ports = {}
|
||||||
|
added_ports[amphora.id] = []
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
interface = self.network_driver.plug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
port = self.network_driver.get_port(interface.port_id)
|
||||||
|
port.network = self.network_driver.get_network(port.network_id)
|
||||||
|
for fixed_ip in port.fixed_ips:
|
||||||
|
fixed_ip.subnet = self.network_driver.get_subnet(
|
||||||
|
fixed_ip.subnet_id)
|
||||||
|
added_ports[amphora.id].append(port)
|
||||||
|
for nic in delta.delete_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except base.NetworkNotFound:
|
||||||
|
LOG.debug("Network %d not found ", nic.network_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Unable to unplug network")
|
||||||
|
return added_ports
|
||||||
|
|
||||||
|
def revert(self, result, amphora, delta, *args, **kwargs):
|
||||||
|
"""Handle a network plug or unplug failures."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not delta:
|
||||||
|
return
|
||||||
|
|
||||||
|
LOG.warning("Unable to plug networks for amp id %s",
|
||||||
|
delta.amphora_id)
|
||||||
|
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HandleNetworkDeltas(BaseNetworkTask):
|
||||||
|
"""Task to plug and unplug networks
|
||||||
|
|
||||||
|
Loop through the deltas and plug or unplug
|
||||||
|
networks based on delta
|
||||||
|
"""
|
||||||
|
|
||||||
|
def execute(self, deltas):
|
||||||
|
"""Handle network plugging based off deltas."""
|
||||||
|
added_ports = {}
|
||||||
|
for amp_id, delta in six.iteritems(deltas):
|
||||||
|
added_ports[amp_id] = []
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
interface = self.network_driver.plug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
port = self.network_driver.get_port(interface.port_id)
|
||||||
|
port.network = self.network_driver.get_network(port.network_id)
|
||||||
|
for fixed_ip in port.fixed_ips:
|
||||||
|
fixed_ip.subnet = self.network_driver.get_subnet(
|
||||||
|
fixed_ip.subnet_id)
|
||||||
|
added_ports[amp_id].append(port)
|
||||||
|
for nic in delta.delete_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except base.NetworkNotFound:
|
||||||
|
LOG.debug("Network %d not found ", nic.network_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Unable to unplug network")
|
||||||
|
return added_ports
|
||||||
|
|
||||||
|
def revert(self, result, deltas, *args, **kwargs):
|
||||||
|
"""Handle a network plug or unplug failures."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
for amp_id, delta in six.iteritems(deltas):
|
||||||
|
LOG.warning("Unable to plug networks for amp id %s",
|
||||||
|
delta.amphora_id)
|
||||||
|
if not delta:
|
||||||
|
return
|
||||||
|
|
||||||
|
for nic in delta.add_nics:
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_network(delta.compute_id,
|
||||||
|
nic.network_id)
|
||||||
|
except base.NetworkNotFound:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class PlugVIP(BaseNetworkTask):
|
||||||
|
"""Task to plumb a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Plumb a vip to an amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id)
|
||||||
|
|
||||||
|
amps_data = self.network_driver.plug_vip(loadbalancer,
|
||||||
|
loadbalancer.vip)
|
||||||
|
return amps_data
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle a failure to plumb a vip."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Unable to plug VIP for loadbalancer id %s",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Make sure we have the current port IDs for cleanup
|
||||||
|
for amp_data in result:
|
||||||
|
for amphora in six.moves.filter(
|
||||||
|
# pylint: disable=cell-var-from-loop
|
||||||
|
lambda amp: amp.id == amp_data.id,
|
||||||
|
loadbalancer.amphorae):
|
||||||
|
amphora.vrrp_port_id = amp_data.vrrp_port_id
|
||||||
|
amphora.ha_port_id = amp_data.ha_port_id
|
||||||
|
|
||||||
|
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to unplug VIP. Resources may still "
|
||||||
|
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||||
|
{'vip': loadbalancer.vip.ip_address, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateVIPSecurityGroup(BaseNetworkTask):
|
||||||
|
"""Task to setup SG for LB."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Task to setup SG for LB."""
|
||||||
|
|
||||||
|
LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id)
|
||||||
|
|
||||||
|
self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip)
|
||||||
|
|
||||||
|
|
||||||
|
class GetSubnetFromVIP(BaseNetworkTask):
|
||||||
|
"""Task to plumb a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Plumb a vip to an amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
|
||||||
|
|
||||||
|
return self.network_driver.get_subnet(loadbalancer.vip.subnet_id)
|
||||||
|
|
||||||
|
|
||||||
|
class PlugVIPAmpphora(BaseNetworkTask):
|
||||||
|
"""Task to plumb a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora, subnet):
|
||||||
|
"""Plumb a vip to an amphora."""
|
||||||
|
|
||||||
|
LOG.debug("Plumbing VIP for amphora id: %s", amphora.id)
|
||||||
|
|
||||||
|
amp_data = self.network_driver.plug_aap_port(
|
||||||
|
loadbalancer, loadbalancer.vip, amphora, subnet)
|
||||||
|
return amp_data
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
|
||||||
|
"""Handle a failure to plumb a vip."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
LOG.warning("Unable to plug VIP for amphora id %s "
|
||||||
|
"load balancer id %s",
|
||||||
|
amphora.id, loadbalancer.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
amphora.vrrp_port_id = result.vrrp_port_id
|
||||||
|
amphora.ha_port_id = result.ha_port_id
|
||||||
|
|
||||||
|
self.network_driver.unplug_aap_port(loadbalancer.vip,
|
||||||
|
amphora, subnet)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to unplug AAP port. Resources may still be in '
|
||||||
|
'use for VIP: %s due to error: %s', loadbalancer.vip, e)
|
||||||
|
|
||||||
|
|
||||||
|
class UnplugVIP(BaseNetworkTask):
|
||||||
|
"""Task to unplug the vip."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Unplug the vip."""
|
||||||
|
|
||||||
|
LOG.debug("Unplug vip on amphora")
|
||||||
|
try:
|
||||||
|
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Unable to unplug vip from load balancer %s",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
|
||||||
|
class AllocateVIP(BaseNetworkTask):
|
||||||
|
"""Task to allocate a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Allocate a vip to the loadbalancer."""
|
||||||
|
|
||||||
|
LOG.debug("Allocate_vip port_id %s, subnet_id %s,"
|
||||||
|
"ip_address %s",
|
||||||
|
loadbalancer.vip.port_id,
|
||||||
|
loadbalancer.vip.subnet_id,
|
||||||
|
loadbalancer.vip.ip_address)
|
||||||
|
return self.network_driver.allocate_vip(loadbalancer)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, *args, **kwargs):
|
||||||
|
"""Handle a failure to allocate vip."""
|
||||||
|
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
LOG.exception("Unable to allocate VIP")
|
||||||
|
return
|
||||||
|
vip = result
|
||||||
|
LOG.warning("Deallocating vip %s", vip.ip_address)
|
||||||
|
try:
|
||||||
|
self.network_driver.deallocate_vip(vip)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to deallocate VIP. Resources may still "
|
||||||
|
"be in use from vip: %(vip)s due to error: %(except)s",
|
||||||
|
{'vip': vip.ip_address, 'except': e})
|
||||||
|
|
||||||
|
|
||||||
|
class DeallocateVIP(BaseNetworkTask):
|
||||||
|
"""Task to deallocate a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
"""Deallocate a VIP."""
|
||||||
|
|
||||||
|
LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address)
|
||||||
|
|
||||||
|
# NOTE(blogan): this is kind of ugly but sufficient for now. Drivers
|
||||||
|
# will need access to the load balancer that the vip is/was attached
|
||||||
|
# to. However the data model serialization for the vip does not give a
|
||||||
|
# backref to the loadbalancer if accessed through the loadbalancer.
|
||||||
|
vip = loadbalancer.vip
|
||||||
|
vip.load_balancer = loadbalancer
|
||||||
|
self.network_driver.deallocate_vip(vip)
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateVIP(BaseNetworkTask):
|
||||||
|
"""Task to update a VIP."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id)
|
||||||
|
|
||||||
|
self.network_driver.update_vip(loadbalancer)
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateVIPForDelete(BaseNetworkTask):
|
||||||
|
"""Task to update a VIP for listener delete flows."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
LOG.debug("Updating VIP for listener delete on load_balancer %s.",
|
||||||
|
loadbalancer.id)
|
||||||
|
|
||||||
|
self.network_driver.update_vip(loadbalancer, for_delete=True)
|
||||||
|
|
||||||
|
|
||||||
|
class GetAmphoraNetworkConfigs(BaseNetworkTask):
|
||||||
|
"""Task to retrieve amphora network details."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora=None):
|
||||||
|
LOG.debug("Retrieving vip network details.")
|
||||||
|
return self.network_driver.get_network_configs(loadbalancer,
|
||||||
|
amphora=amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class GetAmphoraeNetworkConfigs(BaseNetworkTask):
|
||||||
|
"""Task to retrieve amphorae network details."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer):
|
||||||
|
LOG.debug("Retrieving vip network details.")
|
||||||
|
return self.network_driver.get_network_configs(loadbalancer)
|
||||||
|
|
||||||
|
|
||||||
|
class FailoverPreparationForAmphora(BaseNetworkTask):
|
||||||
|
"""Task to prepare an amphora for failover."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
LOG.debug("Prepare amphora %s for failover.", amphora.id)
|
||||||
|
|
||||||
|
self.network_driver.failover_preparation(amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask):
|
||||||
|
"""Task retrieving all the port ids on an amphora, except lb network."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
LOG.debug("Retrieve all but the lb network port id on amphora %s.",
|
||||||
|
amphora.id)
|
||||||
|
|
||||||
|
interfaces = self.network_driver.get_plugged_networks(
|
||||||
|
compute_id=amphora.compute_id)
|
||||||
|
|
||||||
|
ports = []
|
||||||
|
for interface_ in interfaces:
|
||||||
|
if interface_.port_id not in ports:
|
||||||
|
port = self.network_driver.get_port(port_id=interface_.port_id)
|
||||||
|
ips = port.fixed_ips
|
||||||
|
lb_network = False
|
||||||
|
for ip in ips:
|
||||||
|
if ip.ip_address == amphora.lb_network_ip:
|
||||||
|
lb_network = True
|
||||||
|
if not lb_network:
|
||||||
|
ports.append(port)
|
||||||
|
|
||||||
|
return ports
|
||||||
|
|
||||||
|
|
||||||
|
class PlugPorts(BaseNetworkTask):
|
||||||
|
"""Task to plug neutron ports into a compute instance."""
|
||||||
|
|
||||||
|
def execute(self, amphora, ports):
|
||||||
|
for port in ports:
|
||||||
|
LOG.debug('Plugging port ID: %(port_id)s into compute instance: '
|
||||||
|
'%(compute_id)s.',
|
||||||
|
{'port_id': port.id, 'compute_id': amphora.compute_id})
|
||||||
|
self.network_driver.plug_port(amphora, port)
|
||||||
|
|
||||||
|
|
||||||
|
class PlugVIPPort(BaseNetworkTask):
|
||||||
|
"""Task to plug a VIP into a compute instance."""
|
||||||
|
|
||||||
|
def execute(self, amphora, amphorae_network_config):
|
||||||
|
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
||||||
|
LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute '
|
||||||
|
'instance: %(compute_id)s.',
|
||||||
|
{'port_id': vrrp_port.id, 'compute_id': amphora.compute_id})
|
||||||
|
self.network_driver.plug_port(amphora, vrrp_port)
|
||||||
|
|
||||||
|
def revert(self, result, amphora, amphorae_network_config,
|
||||||
|
*args, **kwargs):
|
||||||
|
vrrp_port = None
|
||||||
|
try:
|
||||||
|
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
|
||||||
|
self.network_driver.unplug_port(amphora, vrrp_port)
|
||||||
|
except Exception:
|
||||||
|
LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: '
|
||||||
|
'%(amp)s', {'port': vrrp_port.id, 'amp': amphora.id})
|
||||||
|
|
||||||
|
|
||||||
|
class WaitForPortDetach(BaseNetworkTask):
|
||||||
|
"""Task to wait for the neutron ports to detach from an amphora."""
|
||||||
|
|
||||||
|
def execute(self, amphora):
|
||||||
|
LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.',
|
||||||
|
{'amp_id': amphora.id})
|
||||||
|
self.network_driver.wait_for_port_detach(amphora)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplyQos(BaseNetworkTask):
|
||||||
|
"""Apply Quality of Services to the VIP"""
|
||||||
|
|
||||||
|
def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
|
||||||
|
is_revert=False, request_qos_id=None):
|
||||||
|
"""Call network driver to apply QoS Policy on the vrrp ports."""
|
||||||
|
if not amps_data:
|
||||||
|
amps_data = loadbalancer.amphorae
|
||||||
|
|
||||||
|
apply_qos = ApplyQosAmphora()
|
||||||
|
for amp_data in amps_data:
|
||||||
|
apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data,
|
||||||
|
qos_policy_id)
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amps_data=None, update_dict=None):
|
||||||
|
"""Apply qos policy on the vrrp ports which are related with vip."""
|
||||||
|
qos_policy_id = loadbalancer.vip.qos_policy_id
|
||||||
|
if not qos_policy_id and (
|
||||||
|
update_dict and (
|
||||||
|
'vip' not in update_dict or
|
||||||
|
'qos_policy_id' not in update_dict['vip'])):
|
||||||
|
return
|
||||||
|
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
|
||||||
|
*args, **kwargs):
|
||||||
|
"""Handle a failure to apply QoS to VIP"""
|
||||||
|
request_qos_id = loadbalancer.vip.qos_policy_id
|
||||||
|
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
|
||||||
|
loadbalancer.id)
|
||||||
|
orig_qos_id = orig_lb.vip.qos_policy_id
|
||||||
|
if request_qos_id != orig_qos_id:
|
||||||
|
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
|
||||||
|
is_revert=True,
|
||||||
|
request_qos_id=request_qos_id)
|
||||||
|
|
||||||
|
|
||||||
|
class ApplyQosAmphora(BaseNetworkTask):
|
||||||
|
"""Apply Quality of Services to the VIP"""
|
||||||
|
|
||||||
|
def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id,
|
||||||
|
is_revert=False, request_qos_id=None):
|
||||||
|
"""Call network driver to apply QoS Policy on the vrrp ports."""
|
||||||
|
try:
|
||||||
|
self.network_driver.apply_qos_on_port(qos_policy_id,
|
||||||
|
amp_data.vrrp_port_id)
|
||||||
|
except Exception:
|
||||||
|
if not is_revert:
|
||||||
|
raise
|
||||||
|
LOG.warning('Failed to undo qos policy %(qos_id)s '
|
||||||
|
'on vrrp port: %(port)s from '
|
||||||
|
'amphorae: %(amp)s',
|
||||||
|
{'qos_id': request_qos_id,
|
||||||
|
'port': amp_data.vrrp_port_id,
|
||||||
|
'amp': [amp.id for amp in amp_data]})
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amp_data=None, update_dict=None):
|
||||||
|
"""Apply qos policy on the vrrp ports which are related with vip."""
|
||||||
|
qos_policy_id = loadbalancer.vip.qos_policy_id
|
||||||
|
if not qos_policy_id and (
|
||||||
|
update_dict and (
|
||||||
|
'vip' not in update_dict or
|
||||||
|
'qos_policy_id' not in update_dict['vip'])):
|
||||||
|
return
|
||||||
|
self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amp_data=None, update_dict=None,
|
||||||
|
*args, **kwargs):
|
||||||
|
"""Handle a failure to apply QoS to VIP"""
|
||||||
|
try:
|
||||||
|
request_qos_id = loadbalancer.vip.qos_policy_id
|
||||||
|
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
|
||||||
|
loadbalancer.id)
|
||||||
|
orig_qos_id = orig_lb.vip.qos_policy_id
|
||||||
|
if request_qos_id != orig_qos_id:
|
||||||
|
self._apply_qos_on_vrrp_port(loadbalancer, amp_data,
|
||||||
|
orig_qos_id, is_revert=True,
|
||||||
|
request_qos_id=request_qos_id)
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to remove QoS policy: %s from port: %s due '
|
||||||
|
'to error: %s', orig_qos_id, amp_data.vrrp_port_id, e)
|
11
octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py
Normal file
11
octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -18,7 +18,7 @@ from oslo_utils import uuidutils
|
|||||||
from octavia_lib.api.drivers import data_models as driver_dm
|
from octavia_lib.api.drivers import data_models as driver_dm
|
||||||
from octavia_lib.api.drivers import exceptions
|
from octavia_lib.api.drivers import exceptions
|
||||||
|
|
||||||
from octavia.api.drivers.amphora_driver import driver
|
from octavia.api.drivers.amphora_driver.v1 import driver
|
||||||
from octavia.common import constants as consts
|
from octavia.common import constants as consts
|
||||||
from octavia.network import base as network_base
|
from octavia.network import base as network_base
|
||||||
from octavia.tests.unit.api.drivers import sample_data_models
|
from octavia.tests.unit.api.drivers import sample_data_models
|
11
octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py
Normal file
11
octavia/tests/unit/api/drivers/amphora_driver/v2/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -0,0 +1,486 @@
|
|||||||
|
# Copyright 2018 Rackspace, US Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia_lib.api.drivers import data_models as driver_dm
|
||||||
|
from octavia_lib.api.drivers import exceptions
|
||||||
|
|
||||||
|
from octavia.api.drivers.amphora_driver.v2 import driver
|
||||||
|
from octavia.common import constants as consts
|
||||||
|
from octavia.network import base as network_base
|
||||||
|
from octavia.tests.unit.api.drivers import sample_data_models
|
||||||
|
from octavia.tests.unit import base
|
||||||
|
|
||||||
|
|
||||||
|
class TestAmphoraDriver(base.TestRpc):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestAmphoraDriver, self).setUp()
|
||||||
|
self.amp_driver = driver.AmphoraProviderDriver()
|
||||||
|
self.sample_data = sample_data_models.SampleDriverDataModels()
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
def test_create_vip_port(self, mock_get_net_driver):
|
||||||
|
mock_net_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_net_driver
|
||||||
|
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
|
||||||
|
|
||||||
|
provider_vip_dict = self.amp_driver.create_vip_port(
|
||||||
|
self.sample_data.lb_id, self.sample_data.project_id,
|
||||||
|
self.sample_data.provider_vip_dict)
|
||||||
|
|
||||||
|
self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict)
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
def test_create_vip_port_failed(self, mock_get_net_driver):
|
||||||
|
mock_net_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_net_driver
|
||||||
|
mock_net_driver.allocate_vip.side_effect = (
|
||||||
|
network_base.AllocateVIPException())
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.DriverError,
|
||||||
|
self.amp_driver.create_vip_port,
|
||||||
|
self.sample_data.lb_id, self.sample_data.project_id,
|
||||||
|
self.sample_data.provider_vip_dict)
|
||||||
|
|
||||||
|
# Load Balancer
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_create(self, mock_cast):
|
||||||
|
provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id)
|
||||||
|
self.amp_driver.loadbalancer_create(provider_lb)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
|
||||||
|
consts.FLAVOR: None}
|
||||||
|
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_delete(self, mock_cast):
|
||||||
|
provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id)
|
||||||
|
self.amp_driver.loadbalancer_delete(provider_lb)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
|
||||||
|
'cascade': False}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_failover(self, mock_cast):
|
||||||
|
self.amp_driver.loadbalancer_failover(self.sample_data.lb_id)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id}
|
||||||
|
mock_cast.assert_called_with({}, 'failover_load_balancer', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_update(self, mock_cast):
|
||||||
|
old_provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id)
|
||||||
|
provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
|
||||||
|
lb_dict = {'enabled': True}
|
||||||
|
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
|
||||||
|
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_update_name(self, mock_cast):
|
||||||
|
old_provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id)
|
||||||
|
provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
|
||||||
|
lb_dict = {'name': 'Great LB'}
|
||||||
|
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
|
||||||
|
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_loadbalancer_update_qos(self, mock_cast):
|
||||||
|
qos_policy_id = uuidutils.generate_uuid()
|
||||||
|
old_provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id)
|
||||||
|
provider_lb = driver_dm.LoadBalancer(
|
||||||
|
loadbalancer_id=self.sample_data.lb_id,
|
||||||
|
vip_qos_policy_id=qos_policy_id)
|
||||||
|
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
|
||||||
|
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
|
||||||
|
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
|
||||||
|
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
|
||||||
|
|
||||||
|
# Listener
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_listener_create(self, mock_cast):
|
||||||
|
provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id)
|
||||||
|
self.amp_driver.listener_create(provider_listener)
|
||||||
|
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_listener', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_listener_delete(self, mock_cast):
|
||||||
|
provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id)
|
||||||
|
self.amp_driver.listener_delete(provider_listener)
|
||||||
|
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_listener', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_listener_update(self, mock_cast):
|
||||||
|
old_provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id)
|
||||||
|
provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id, admin_state_up=False)
|
||||||
|
listener_dict = {'enabled': False}
|
||||||
|
self.amp_driver.listener_update(old_provider_listener,
|
||||||
|
provider_listener)
|
||||||
|
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
|
||||||
|
consts.LISTENER_UPDATES: listener_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_listener', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_listener_update_name(self, mock_cast):
|
||||||
|
old_provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id)
|
||||||
|
provider_listener = driver_dm.Listener(
|
||||||
|
listener_id=self.sample_data.listener1_id, name='Great Listener')
|
||||||
|
listener_dict = {'name': 'Great Listener'}
|
||||||
|
self.amp_driver.listener_update(old_provider_listener,
|
||||||
|
provider_listener)
|
||||||
|
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
|
||||||
|
consts.LISTENER_UPDATES: listener_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_listener', **payload)
|
||||||
|
|
||||||
|
# Pool
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_pool_create(self, mock_cast):
|
||||||
|
provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id)
|
||||||
|
self.amp_driver.pool_create(provider_pool)
|
||||||
|
payload = {consts.POOL_ID: self.sample_data.pool1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_pool', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_pool_delete(self, mock_cast):
|
||||||
|
provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id)
|
||||||
|
self.amp_driver.pool_delete(provider_pool)
|
||||||
|
payload = {consts.POOL_ID: self.sample_data.pool1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_pool', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_pool_update(self, mock_cast):
|
||||||
|
old_provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id)
|
||||||
|
provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id, admin_state_up=True)
|
||||||
|
pool_dict = {'enabled': True}
|
||||||
|
self.amp_driver.pool_update(old_provider_pool, provider_pool)
|
||||||
|
payload = {consts.POOL_ID: self.sample_data.pool1_id,
|
||||||
|
consts.POOL_UPDATES: pool_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_pool', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_pool_update_name(self, mock_cast):
|
||||||
|
old_provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id)
|
||||||
|
provider_pool = driver_dm.Pool(
|
||||||
|
pool_id=self.sample_data.pool1_id, name='Great pool',
|
||||||
|
admin_state_up=True, tls_enabled=True)
|
||||||
|
pool_dict = {'name': 'Great pool',
|
||||||
|
'enabled': True,
|
||||||
|
'tls_enabled': True}
|
||||||
|
self.amp_driver.pool_update(old_provider_pool, provider_pool)
|
||||||
|
payload = {consts.POOL_ID: self.sample_data.pool1_id,
|
||||||
|
consts.POOL_UPDATES: pool_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_pool', **payload)
|
||||||
|
|
||||||
|
# Member
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_create(self, mock_cast):
|
||||||
|
provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id)
|
||||||
|
self.amp_driver.member_create(provider_member)
|
||||||
|
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_member', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_delete(self, mock_cast):
|
||||||
|
provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id)
|
||||||
|
self.amp_driver.member_delete(provider_member)
|
||||||
|
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_member', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_update(self, mock_cast):
|
||||||
|
old_provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id)
|
||||||
|
provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id, admin_state_up=True)
|
||||||
|
member_dict = {'enabled': True}
|
||||||
|
self.amp_driver.member_update(old_provider_member, provider_member)
|
||||||
|
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
|
||||||
|
consts.MEMBER_UPDATES: member_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_member', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_update_name(self, mock_cast):
|
||||||
|
old_provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id)
|
||||||
|
provider_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member1_id, name='Great member')
|
||||||
|
member_dict = {'name': 'Great member'}
|
||||||
|
self.amp_driver.member_update(old_provider_member, provider_member)
|
||||||
|
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
|
||||||
|
consts.MEMBER_UPDATES: member_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_member', **payload)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.api.get_session')
|
||||||
|
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session):
|
||||||
|
mock_pool = mock.MagicMock()
|
||||||
|
mock_pool.members = self.sample_data.db_pool1_members
|
||||||
|
mock_pool_get.return_value = mock_pool
|
||||||
|
|
||||||
|
prov_mem_update = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member2_id,
|
||||||
|
pool_id=self.sample_data.pool1_id, admin_state_up=False,
|
||||||
|
address='192.0.2.17', monitor_address='192.0.2.77',
|
||||||
|
protocol_port=80, name='updated-member2')
|
||||||
|
prov_new_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member3_id,
|
||||||
|
pool_id=self.sample_data.pool1_id,
|
||||||
|
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||||
|
protocol_port=80, name='member3')
|
||||||
|
prov_members = [prov_mem_update, prov_new_member]
|
||||||
|
|
||||||
|
update_mem_dict = {'ip_address': '192.0.2.17',
|
||||||
|
'name': 'updated-member2',
|
||||||
|
'monitor_address': '192.0.2.77',
|
||||||
|
'id': self.sample_data.member2_id,
|
||||||
|
'enabled': False,
|
||||||
|
'protocol_port': 80,
|
||||||
|
'pool_id': self.sample_data.pool1_id}
|
||||||
|
|
||||||
|
self.amp_driver.member_batch_update(prov_members)
|
||||||
|
|
||||||
|
payload = {'old_member_ids': [self.sample_data.member1_id],
|
||||||
|
'new_member_ids': [self.sample_data.member3_id],
|
||||||
|
'updated_members': [update_mem_dict]}
|
||||||
|
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.api.get_session')
|
||||||
|
@mock.patch('octavia.db.repositories.PoolRepository.get')
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_member_batch_update_no_admin_addr(self, mock_cast,
|
||||||
|
mock_pool_get, mock_session):
|
||||||
|
mock_pool = mock.MagicMock()
|
||||||
|
mock_pool.members = self.sample_data.db_pool1_members
|
||||||
|
mock_pool_get.return_value = mock_pool
|
||||||
|
|
||||||
|
prov_mem_update = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member2_id,
|
||||||
|
pool_id=self.sample_data.pool1_id,
|
||||||
|
monitor_address='192.0.2.77',
|
||||||
|
protocol_port=80, name='updated-member2')
|
||||||
|
prov_new_member = driver_dm.Member(
|
||||||
|
member_id=self.sample_data.member3_id,
|
||||||
|
pool_id=self.sample_data.pool1_id,
|
||||||
|
address='192.0.2.18', monitor_address='192.0.2.28',
|
||||||
|
protocol_port=80, name='member3')
|
||||||
|
prov_members = [prov_mem_update, prov_new_member]
|
||||||
|
|
||||||
|
update_mem_dict = {'name': 'updated-member2',
|
||||||
|
'monitor_address': '192.0.2.77',
|
||||||
|
'id': self.sample_data.member2_id,
|
||||||
|
'protocol_port': 80,
|
||||||
|
'pool_id': self.sample_data.pool1_id}
|
||||||
|
|
||||||
|
self.amp_driver.member_batch_update(prov_members)
|
||||||
|
|
||||||
|
payload = {'old_member_ids': [self.sample_data.member1_id],
|
||||||
|
'new_member_ids': [self.sample_data.member3_id],
|
||||||
|
'updated_members': [update_mem_dict]}
|
||||||
|
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
|
||||||
|
|
||||||
|
# Health Monitor
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_health_monitor_create(self, mock_cast):
|
||||||
|
provider_HM = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id)
|
||||||
|
self.amp_driver.health_monitor_create(provider_HM)
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_health_monitor', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_health_monitor_delete(self, mock_cast):
|
||||||
|
provider_HM = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id)
|
||||||
|
self.amp_driver.health_monitor_delete(provider_HM)
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_health_monitor_update(self, mock_cast):
|
||||||
|
old_provider_hm = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id)
|
||||||
|
provider_hm = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True,
|
||||||
|
max_retries=1, max_retries_down=2)
|
||||||
|
hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2}
|
||||||
|
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
|
||||||
|
consts.HEALTH_MONITOR_UPDATES: hm_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_health_monitor_update_name(self, mock_cast):
|
||||||
|
old_provider_hm = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id)
|
||||||
|
provider_hm = driver_dm.HealthMonitor(
|
||||||
|
healthmonitor_id=self.sample_data.hm1_id, name='Great HM')
|
||||||
|
hm_dict = {'name': 'Great HM'}
|
||||||
|
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
|
||||||
|
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
|
||||||
|
consts.HEALTH_MONITOR_UPDATES: hm_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
|
||||||
|
|
||||||
|
# L7 Policy
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7policy_create(self, mock_cast):
|
||||||
|
provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id)
|
||||||
|
self.amp_driver.l7policy_create(provider_l7policy)
|
||||||
|
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_l7policy', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7policy_delete(self, mock_cast):
|
||||||
|
provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id)
|
||||||
|
self.amp_driver.l7policy_delete(provider_l7policy)
|
||||||
|
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_l7policy', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7policy_update(self, mock_cast):
|
||||||
|
old_provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id)
|
||||||
|
provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True)
|
||||||
|
l7policy_dict = {'enabled': True}
|
||||||
|
self.amp_driver.l7policy_update(old_provider_l7policy,
|
||||||
|
provider_l7policy)
|
||||||
|
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
|
||||||
|
consts.L7POLICY_UPDATES: l7policy_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7policy_update_name(self, mock_cast):
|
||||||
|
old_provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id)
|
||||||
|
provider_l7policy = driver_dm.L7Policy(
|
||||||
|
l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy')
|
||||||
|
l7policy_dict = {'name': 'Great L7Policy'}
|
||||||
|
self.amp_driver.l7policy_update(old_provider_l7policy,
|
||||||
|
provider_l7policy)
|
||||||
|
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
|
||||||
|
consts.L7POLICY_UPDATES: l7policy_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
|
||||||
|
|
||||||
|
# L7 Rules
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7rule_create(self, mock_cast):
|
||||||
|
provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id)
|
||||||
|
self.amp_driver.l7rule_create(provider_l7rule)
|
||||||
|
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'create_l7rule', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7rule_delete(self, mock_cast):
|
||||||
|
provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id)
|
||||||
|
self.amp_driver.l7rule_delete(provider_l7rule)
|
||||||
|
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
|
||||||
|
mock_cast.assert_called_with({}, 'delete_l7rule', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7rule_update(self, mock_cast):
|
||||||
|
old_provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id)
|
||||||
|
provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True)
|
||||||
|
l7rule_dict = {'enabled': True}
|
||||||
|
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
|
||||||
|
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
|
||||||
|
consts.L7RULE_UPDATES: l7rule_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
|
||||||
|
|
||||||
|
@mock.patch('oslo_messaging.RPCClient.cast')
|
||||||
|
def test_l7rule_update_invert(self, mock_cast):
|
||||||
|
old_provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id)
|
||||||
|
provider_l7rule = driver_dm.L7Rule(
|
||||||
|
l7rule_id=self.sample_data.l7rule1_id, invert=True)
|
||||||
|
l7rule_dict = {'invert': True}
|
||||||
|
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
|
||||||
|
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
|
||||||
|
consts.L7RULE_UPDATES: l7rule_dict}
|
||||||
|
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
|
||||||
|
|
||||||
|
# Flavor
|
||||||
|
def test_get_supported_flavor_metadata(self):
|
||||||
|
test_schema = {
|
||||||
|
"properties": {
|
||||||
|
"test_name": {"description": "Test description"},
|
||||||
|
"test_name2": {"description": "Another description"}}}
|
||||||
|
ref_dict = {"test_name": "Test description",
|
||||||
|
"test_name2": "Another description"}
|
||||||
|
|
||||||
|
# mock out the supported_flavor_metadata
|
||||||
|
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
|
||||||
|
'SUPPORTED_FLAVOR_SCHEMA', test_schema):
|
||||||
|
result = self.amp_driver.get_supported_flavor_metadata()
|
||||||
|
self.assertEqual(ref_dict, result)
|
||||||
|
|
||||||
|
# Test for bad schema
|
||||||
|
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
|
||||||
|
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
|
||||||
|
self.assertRaises(exceptions.DriverError,
|
||||||
|
self.amp_driver.get_supported_flavor_metadata)
|
||||||
|
|
||||||
|
@mock.patch('jsonschema.validators.requests')
|
||||||
|
def test_validate_flavor(self, mock_validate):
|
||||||
|
ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE}
|
||||||
|
self.amp_driver.validate_flavor(ref_dict)
|
||||||
|
|
||||||
|
# Test bad flavor metadata value is bad
|
||||||
|
ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'}
|
||||||
|
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||||
|
self.amp_driver.validate_flavor,
|
||||||
|
ref_dict)
|
||||||
|
|
||||||
|
# Test bad flavor metadata key
|
||||||
|
ref_dict = {'bogus': 'bogus'}
|
||||||
|
self.assertRaises(exceptions.UnsupportedOptionError,
|
||||||
|
self.amp_driver.validate_flavor,
|
||||||
|
ref_dict)
|
||||||
|
|
||||||
|
# Test for bad schema
|
||||||
|
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
|
||||||
|
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
|
||||||
|
self.assertRaises(exceptions.DriverError,
|
||||||
|
self.amp_driver.validate_flavor, 'bogus')
|
@ -45,7 +45,7 @@ class TestHealthManager(base.TestCase):
|
|||||||
super(TestHealthManager, self).setUp()
|
super(TestHealthManager, self).setUp()
|
||||||
|
|
||||||
@mock.patch('octavia.db.api.wait_for_connection')
|
@mock.patch('octavia.db.api.wait_for_connection')
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.failover_amphora')
|
'ControllerWorker.failover_amphora')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
||||||
'get_stale_amphora')
|
'get_stale_amphora')
|
||||||
@ -86,7 +86,7 @@ class TestHealthManager(base.TestCase):
|
|||||||
self.assertRaises(TestException, hm.health_check)
|
self.assertRaises(TestException, hm.health_check)
|
||||||
self.assertEqual(4, mock_session.rollback.call_count)
|
self.assertEqual(4, mock_session.rollback.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.failover_amphora')
|
'ControllerWorker.failover_amphora')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
||||||
'get_stale_amphora', return_value=None)
|
'get_stale_amphora', return_value=None)
|
||||||
@ -102,7 +102,7 @@ class TestHealthManager(base.TestCase):
|
|||||||
session_mock.assert_called_once_with(autocommit=False)
|
session_mock.assert_called_once_with(autocommit=False)
|
||||||
self.assertFalse(failover_mock.called)
|
self.assertFalse(failover_mock.called)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.failover_amphora')
|
'ControllerWorker.failover_amphora')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.'
|
||||||
'get_stale_amphora', return_value=None)
|
'get_stale_amphora', return_value=None)
|
||||||
|
@ -219,7 +219,7 @@ class TestCertRotation(base.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestCertRotation, self).setUp()
|
super(TestCertRotation, self).setUp()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.amphora_cert_rotation')
|
'ControllerWorker.amphora_cert_rotation')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
||||||
'get_cert_expiring_amphora')
|
'get_cert_expiring_amphora')
|
||||||
@ -239,7 +239,7 @@ class TestCertRotation(base.TestCase):
|
|||||||
self.assertRaises(TestException, cr.rotate)
|
self.assertRaises(TestException, cr.rotate)
|
||||||
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
|
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.amphora_cert_rotation')
|
'ControllerWorker.amphora_cert_rotation')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
||||||
'get_cert_expiring_amphora')
|
'get_cert_expiring_amphora')
|
||||||
@ -259,7 +259,7 @@ class TestCertRotation(base.TestCase):
|
|||||||
self.assertIsNone(cr.rotate())
|
self.assertIsNone(cr.rotate())
|
||||||
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
|
amp_cert_mock.assert_called_once_with(AMPHORA_ID)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'ControllerWorker.amphora_cert_rotation')
|
'ControllerWorker.amphora_cert_rotation')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
@mock.patch('octavia.db.repositories.AmphoraRepository.'
|
||||||
'get_cert_expiring_amphora')
|
'get_cert_expiring_amphora')
|
||||||
|
11
octavia/tests/unit/controller/queue/v1/__init__.py
Normal file
11
octavia/tests/unit/controller/queue/v1/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -17,8 +17,8 @@ from oslo_config import cfg
|
|||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
|
|
||||||
from octavia.controller.queue import consumer
|
from octavia.controller.queue.v1 import consumer
|
||||||
from octavia.controller.queue import endpoint
|
from octavia.controller.queue.v1 import endpoints
|
||||||
from octavia.tests.unit import base
|
from octavia.tests.unit import base
|
||||||
|
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ class TestConsumer(base.TestRpc):
|
|||||||
self.conf = conf.conf
|
self.conf = conf.conf
|
||||||
|
|
||||||
@mock.patch.object(messaging, 'Target')
|
@mock.patch.object(messaging, 'Target')
|
||||||
@mock.patch.object(endpoint, 'Endpoint')
|
@mock.patch.object(endpoints, 'Endpoints')
|
||||||
@mock.patch.object(messaging, 'get_rpc_server')
|
@mock.patch.object(messaging, 'get_rpc_server')
|
||||||
def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target):
|
def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target):
|
||||||
mock_rpc_server_rv = mock.Mock()
|
mock_rpc_server_rv = mock.Mock()
|
@ -17,25 +17,25 @@ from oslo_config import cfg
|
|||||||
from oslo_config import fixture as oslo_fixture
|
from oslo_config import fixture as oslo_fixture
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from octavia.controller.queue import endpoint
|
from octavia.controller.queue.v1 import endpoints
|
||||||
from octavia.controller.worker import controller_worker
|
from octavia.controller.worker.v1 import controller_worker
|
||||||
from octavia.tests.unit import base
|
from octavia.tests.unit import base
|
||||||
|
|
||||||
|
|
||||||
class TestEndpoint(base.TestCase):
|
class TestEndpoints(base.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestEndpoint, self).setUp()
|
super(TestEndpoints, self).setUp()
|
||||||
|
|
||||||
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
conf.config(octavia_plugins='hot_plug_plugin')
|
conf.config(octavia_plugins='hot_plug_plugin')
|
||||||
|
|
||||||
mock_class = mock.create_autospec(controller_worker.ControllerWorker)
|
mock_class = mock.create_autospec(controller_worker.ControllerWorker)
|
||||||
self.worker_patcher = mock.patch('octavia.controller.queue.endpoint.'
|
self.worker_patcher = mock.patch('octavia.controller.queue.v1.'
|
||||||
'stevedore_driver')
|
'endpoints.stevedore_driver')
|
||||||
self.worker_patcher.start().ControllerWorker = mock_class
|
self.worker_patcher.start().ControllerWorker = mock_class
|
||||||
|
|
||||||
self.ep = endpoint.Endpoint()
|
self.ep = endpoints.Endpoints()
|
||||||
self.context = {}
|
self.context = {}
|
||||||
self.resource_updates = {}
|
self.resource_updates = {}
|
||||||
self.resource_id = 1234
|
self.resource_id = 1234
|
11
octavia/tests/unit/controller/queue/v2/__init__.py
Normal file
11
octavia/tests/unit/controller/queue/v2/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
72
octavia/tests/unit/controller/queue/v2/test_consumer.py
Normal file
72
octavia/tests/unit/controller/queue/v2/test_consumer.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
# Copyright 2014 Rackspace
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
import oslo_messaging as messaging
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.queue.v2 import consumer
|
||||||
|
from octavia.controller.queue.v2 import endpoints
|
||||||
|
from octavia.tests.unit import base
|
||||||
|
|
||||||
|
|
||||||
|
class TestConsumer(base.TestRpc):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestConsumer, self).setUp()
|
||||||
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
conf.config(host='test-hostname')
|
||||||
|
self.conf = conf.conf
|
||||||
|
|
||||||
|
@mock.patch.object(messaging, 'Target')
|
||||||
|
@mock.patch.object(endpoints, 'Endpoints')
|
||||||
|
@mock.patch.object(messaging, 'get_rpc_server')
|
||||||
|
def test_consumer_run(self, mock_rpc_server, mock_endpoint, mock_target):
|
||||||
|
mock_rpc_server_rv = mock.Mock()
|
||||||
|
mock_rpc_server.return_value = mock_rpc_server_rv
|
||||||
|
mock_endpoint_rv = mock.Mock()
|
||||||
|
mock_endpoint.return_value = mock_endpoint_rv
|
||||||
|
mock_target_rv = mock.Mock()
|
||||||
|
mock_target.return_value = mock_target_rv
|
||||||
|
|
||||||
|
consumer.ConsumerService(1, self.conf).run()
|
||||||
|
|
||||||
|
mock_target.assert_called_once_with(topic=constants.TOPIC_AMPHORA_V2,
|
||||||
|
server='test-hostname',
|
||||||
|
fanout=False)
|
||||||
|
mock_endpoint.assert_called_once_with()
|
||||||
|
|
||||||
|
@mock.patch.object(messaging, 'get_rpc_server')
|
||||||
|
def test_consumer_terminate(self, mock_rpc_server):
|
||||||
|
mock_rpc_server_rv = mock.Mock()
|
||||||
|
mock_rpc_server.return_value = mock_rpc_server_rv
|
||||||
|
|
||||||
|
cons = consumer.ConsumerService(1, self.conf)
|
||||||
|
cons.run()
|
||||||
|
cons.terminate()
|
||||||
|
mock_rpc_server_rv.stop.assert_called_once_with()
|
||||||
|
self.assertFalse(mock_rpc_server_rv.wait.called)
|
||||||
|
|
||||||
|
@mock.patch.object(messaging, 'get_rpc_server')
|
||||||
|
def test_consumer_graceful_terminate(self, mock_rpc_server):
|
||||||
|
mock_rpc_server_rv = mock.Mock()
|
||||||
|
mock_rpc_server.return_value = mock_rpc_server_rv
|
||||||
|
|
||||||
|
cons = consumer.ConsumerService(1, self.conf)
|
||||||
|
cons.run()
|
||||||
|
cons.terminate(graceful=True)
|
||||||
|
mock_rpc_server_rv.stop.assert_called_once_with()
|
||||||
|
mock_rpc_server_rv.wait.assert_called_once_with()
|
182
octavia/tests/unit/controller/queue/v2/test_endpoints.py
Normal file
182
octavia/tests/unit/controller/queue/v2/test_endpoints.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Copyright 2014 Rackspace
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.controller.queue.v2 import endpoints
|
||||||
|
from octavia.controller.worker.v2 import controller_worker
|
||||||
|
from octavia.tests.unit import base
|
||||||
|
|
||||||
|
|
||||||
|
class TestEndpoints(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestEndpoints, self).setUp()
|
||||||
|
|
||||||
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
conf.config(octavia_plugins='hot_plug_plugin')
|
||||||
|
|
||||||
|
mock_class = mock.create_autospec(controller_worker.ControllerWorker)
|
||||||
|
self.worker_patcher = mock.patch('octavia.controller.queue.v2.'
|
||||||
|
'endpoints.stevedore_driver')
|
||||||
|
self.worker_patcher.start().ControllerWorker = mock_class
|
||||||
|
|
||||||
|
self.ep = endpoints.Endpoints()
|
||||||
|
self.context = {}
|
||||||
|
self.resource_updates = {}
|
||||||
|
self.resource_id = 1234
|
||||||
|
self.server_group_id = 3456
|
||||||
|
self.flavor_id = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
def test_create_load_balancer(self):
|
||||||
|
self.ep.create_load_balancer(self.context, self.resource_id,
|
||||||
|
flavor=self.flavor_id)
|
||||||
|
self.ep.worker.create_load_balancer.assert_called_once_with(
|
||||||
|
self.resource_id, self.flavor_id)
|
||||||
|
|
||||||
|
def test_create_load_balancer_no_flavor(self):
|
||||||
|
self.ep.create_load_balancer(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_load_balancer.assert_called_once_with(
|
||||||
|
self.resource_id, None)
|
||||||
|
|
||||||
|
def test_update_load_balancer(self):
|
||||||
|
self.ep.update_load_balancer(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_load_balancer.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_load_balancer(self):
|
||||||
|
self.ep.delete_load_balancer(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_load_balancer.assert_called_once_with(
|
||||||
|
self.resource_id, False)
|
||||||
|
|
||||||
|
def test_failover_load_balancer(self):
|
||||||
|
self.ep.failover_load_balancer(self.context, self.resource_id)
|
||||||
|
self.ep.worker.failover_loadbalancer.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_failover_amphora(self):
|
||||||
|
self.ep.failover_amphora(self.context, self.resource_id)
|
||||||
|
self.ep.worker.failover_amphora.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_listener(self):
|
||||||
|
self.ep.create_listener(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_listener.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_listener(self):
|
||||||
|
self.ep.update_listener(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_listener.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_listener(self):
|
||||||
|
self.ep.delete_listener(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_listener.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_pool(self):
|
||||||
|
self.ep.create_pool(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_pool.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_pool(self):
|
||||||
|
self.ep.update_pool(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_pool.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_pool(self):
|
||||||
|
self.ep.delete_pool(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_pool.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_health_monitor(self):
|
||||||
|
self.ep.create_health_monitor(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_health_monitor.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_health_monitor(self):
|
||||||
|
self.ep.update_health_monitor(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_health_monitor.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_health_monitor(self):
|
||||||
|
self.ep.delete_health_monitor(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_health_monitor.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_member(self):
|
||||||
|
self.ep.create_member(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_member.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_member(self):
|
||||||
|
self.ep.update_member(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_member.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_batch_update_members(self):
|
||||||
|
self.ep.batch_update_members(
|
||||||
|
self.context, [9], [11], [self.resource_updates])
|
||||||
|
self.ep.worker.batch_update_members.assert_called_once_with(
|
||||||
|
[9], [11], [self.resource_updates])
|
||||||
|
|
||||||
|
def test_delete_member(self):
|
||||||
|
self.ep.delete_member(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_member.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_l7policy(self):
|
||||||
|
self.ep.create_l7policy(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_l7policy.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_l7policy(self):
|
||||||
|
self.ep.update_l7policy(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_l7policy.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_l7policy(self):
|
||||||
|
self.ep.delete_l7policy(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_l7policy.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_create_l7rule(self):
|
||||||
|
self.ep.create_l7rule(self.context, self.resource_id)
|
||||||
|
self.ep.worker.create_l7rule.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_l7rule(self):
|
||||||
|
self.ep.update_l7rule(self.context, self.resource_id,
|
||||||
|
self.resource_updates)
|
||||||
|
self.ep.worker.update_l7rule.assert_called_once_with(
|
||||||
|
self.resource_id, self.resource_updates)
|
||||||
|
|
||||||
|
def test_delete_l7rule(self):
|
||||||
|
self.ep.delete_l7rule(self.context, self.resource_id)
|
||||||
|
self.ep.worker.delete_l7rule.assert_called_once_with(
|
||||||
|
self.resource_id)
|
||||||
|
|
||||||
|
def test_update_amphora_agent_config(self):
|
||||||
|
self.ep.update_amphora_agent_config(self.context, self.resource_id)
|
||||||
|
self.ep.worker.update_amphora_agent_config.assert_called_once_with(
|
||||||
|
self.resource_id)
|
11
octavia/tests/unit/controller/worker/v1/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v1/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
11
octavia/tests/unit/controller/worker/v1/flows/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v1/flows/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -20,7 +20,7 @@ from taskflow.patterns import linear_flow as flow
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.controller.worker.flows import amphora_flows
|
from octavia.controller.worker.v1.flows import amphora_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
AUTH_VERSION = '2'
|
AUTH_VERSION = '2'
|
@ -16,7 +16,7 @@
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import health_monitor_flows
|
from octavia.controller.worker.v1.flows import health_monitor_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import l7policy_flows
|
from octavia.controller.worker.v1.flows import l7policy_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import l7rule_flows
|
from octavia.controller.worker.v1.flows import l7rule_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -16,7 +16,7 @@ import mock
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import listener_flows
|
from octavia.controller.worker.v1.flows import listener_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -20,7 +20,7 @@ from taskflow.patterns import linear_flow as flow
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.controller.worker.flows import load_balancer_flows
|
from octavia.controller.worker.v1.flows import load_balancer_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -17,7 +17,7 @@ import mock
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import member_flows
|
from octavia.controller.worker.v1.flows import member_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
|||||||
from taskflow.patterns import linear_flow as flow
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.controller.worker.flows import pool_flows
|
from octavia.controller.worker.v1.flows import pool_flows
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
11
octavia/tests/unit/controller/worker/v1/tasks/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v1/tasks/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -24,7 +24,7 @@ from octavia.amphorae.driver_exceptions import exceptions as driver_except
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.tasks import amphora_driver_tasks
|
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ _session_mock = mock.MagicMock()
|
|||||||
@mock.patch('octavia.db.repositories.ListenerRepository.get',
|
@mock.patch('octavia.db.repositories.ListenerRepository.get',
|
||||||
return_value=_listener_mock)
|
return_value=_listener_mock)
|
||||||
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
|
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
|
||||||
@mock.patch('octavia.controller.worker.tasks.amphora_driver_tasks.LOG')
|
@mock.patch('octavia.controller.worker.v1.tasks.amphora_driver_tasks.LOG')
|
||||||
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
|
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
|
||||||
@mock.patch('stevedore.driver.DriverManager.driver')
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
class TestAmphoraDriverTasks(base.TestCase):
|
class TestAmphoraDriverTasks(base.TestCase):
|
@ -20,7 +20,7 @@ from oslo_config import cfg
|
|||||||
|
|
||||||
from octavia.certificates.common import local
|
from octavia.certificates.common import local
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.tasks import cert_task
|
from octavia.controller.worker.v1.tasks import cert_task
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
@ -22,7 +22,7 @@ from oslo_utils import uuidutils
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.tasks import compute_tasks
|
from octavia.controller.worker.v1.tasks import compute_tasks
|
||||||
from octavia.tests.common import utils as test_utils
|
from octavia.tests.common import utils as test_utils
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
@ -25,7 +25,7 @@ from taskflow.types import failure
|
|||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import utils
|
from octavia.common import utils
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
from octavia.db import repositories as repo
|
from octavia.db import repositories as repo
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ _compute_mock.compute_flavor = COMPUTE_FLAVOR
|
|||||||
@mock.patch('octavia.db.repositories.ListenerRepository.update')
|
@mock.patch('octavia.db.repositories.ListenerRepository.update')
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
@mock.patch('octavia.db.api.get_session', return_value='TEST')
|
||||||
@mock.patch('octavia.controller.worker.tasks.database_tasks.LOG')
|
@mock.patch('octavia.controller.worker.v1.tasks.database_tasks.LOG')
|
||||||
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
|
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
|
||||||
class TestDatabaseTasks(base.TestCase):
|
class TestDatabaseTasks(base.TestCase):
|
||||||
|
|
@ -19,7 +19,7 @@ from taskflow.types import failure
|
|||||||
|
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.controller.worker.tasks import database_tasks
|
from octavia.controller.worker.v1.tasks import database_tasks
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -15,7 +15,7 @@
|
|||||||
import mock
|
import mock
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from octavia.controller.worker.tasks import lifecycle_tasks
|
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
|
||||||
from octavia.controller.worker.tasks import model_tasks
|
from octavia.controller.worker.v1.tasks import model_tasks
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
@ -21,7 +21,7 @@ from taskflow.types import failure
|
|||||||
|
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models as o_data_models
|
from octavia.common import data_models as o_data_models
|
||||||
from octavia.controller.worker.tasks import network_tasks
|
from octavia.controller.worker.v1.tasks import network_tasks
|
||||||
from octavia.network import base as net_base
|
from octavia.network import base as net_base
|
||||||
from octavia.network import data_models
|
from octavia.network import data_models
|
||||||
from octavia.tests.common import constants as t_constants
|
from octavia.tests.common import constants as t_constants
|
@ -21,7 +21,7 @@ from oslo_utils import uuidutils
|
|||||||
from octavia.common import base_taskflow
|
from octavia.common import base_taskflow
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.controller.worker import controller_worker
|
from octavia.controller.worker.v1 import controller_worker
|
||||||
import octavia.tests.unit.base as base
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
@ -115,7 +115,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
super(TestControllerWorker, self).setUp()
|
super(TestControllerWorker, self).setUp()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_create_amphora_flow',
|
'amphora_flows.AmphoraFlows.get_create_amphora_flow',
|
||||||
return_value='TEST')
|
return_value='TEST')
|
||||||
def test_create_amphora(self,
|
def test_create_amphora(self,
|
||||||
@ -150,7 +150,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(AMP_ID, amp)
|
self.assertEqual(AMP_ID, amp)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_delete_amphora_flow',
|
'amphora_flows.AmphoraFlows.get_delete_amphora_flow',
|
||||||
return_value='TEST')
|
return_value='TEST')
|
||||||
def test_delete_amphora(self,
|
def test_delete_amphora(self,
|
||||||
@ -182,7 +182,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'health_monitor_flows.HealthMonitorFlows.'
|
'health_monitor_flows.HealthMonitorFlows.'
|
||||||
'get_create_health_monitor_flow',
|
'get_create_health_monitor_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@ -220,7 +220,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_health_mon_repo_get.call_count)
|
self.assertEqual(2, mock_health_mon_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'health_monitor_flows.HealthMonitorFlows.'
|
'health_monitor_flows.HealthMonitorFlows.'
|
||||||
'get_delete_health_monitor_flow',
|
'get_delete_health_monitor_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@ -256,7 +256,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'health_monitor_flows.HealthMonitorFlows.'
|
'health_monitor_flows.HealthMonitorFlows.'
|
||||||
'get_update_health_monitor_flow',
|
'get_update_health_monitor_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@ -296,7 +296,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'listener_flows.ListenerFlows.get_create_listener_flow',
|
'listener_flows.ListenerFlows.get_create_listener_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_listener(self,
|
def test_create_listener(self,
|
||||||
@ -329,7 +329,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_listener_repo_get.call_count)
|
self.assertEqual(2, mock_listener_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'listener_flows.ListenerFlows.get_delete_listener_flow',
|
'listener_flows.ListenerFlows.get_delete_listener_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_delete_listener(self,
|
def test_delete_listener(self,
|
||||||
@ -358,7 +358,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'listener_flows.ListenerFlows.get_update_listener_flow',
|
'listener_flows.ListenerFlows.get_update_listener_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_listener(self,
|
def test_update_listener(self,
|
||||||
@ -393,7 +393,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_create_load_balancer_flow',
|
'LoadBalancerFlows.get_create_load_balancer_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_load_balancer_single(
|
def test_create_load_balancer_single(
|
||||||
@ -439,7 +439,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_eng.run.assert_any_call()
|
mock_eng.run.assert_any_call()
|
||||||
self.assertEqual(4, mock_lb_repo_get.call_count)
|
self.assertEqual(4, mock_lb_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_create_load_balancer_flow',
|
'LoadBalancerFlows.get_create_load_balancer_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_load_balancer_active_standby(
|
def test_create_load_balancer_active_standby(
|
||||||
@ -484,7 +484,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_get_create_load_balancer_flow.return_value, store=store)
|
mock_get_create_load_balancer_flow.return_value, store=store)
|
||||||
mock_eng.run.assert_any_call()
|
mock_eng.run.assert_any_call()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_create_load_balancer_flow')
|
'LoadBalancerFlows.get_create_load_balancer_flow')
|
||||||
def test_create_load_balancer_full_graph_single(
|
def test_create_load_balancer_full_graph_single(
|
||||||
self,
|
self,
|
||||||
@ -530,11 +530,11 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_get_create_load_balancer_flow.return_value, store=store)
|
mock_get_create_load_balancer_flow.return_value, store=store)
|
||||||
mock_eng.run.assert_any_call()
|
mock_eng.run.assert_any_call()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_create_load_balancer_flow')
|
'LoadBalancerFlows.get_create_load_balancer_flow')
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows._create_single_topology')
|
'LoadBalancerFlows._create_single_topology')
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows._create_active_standby_topology')
|
'LoadBalancerFlows._create_active_standby_topology')
|
||||||
def test_create_load_balancer_full_graph_active_standby(
|
def test_create_load_balancer_full_graph_active_standby(
|
||||||
self,
|
self,
|
||||||
@ -581,7 +581,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_get_create_load_balancer_flow.return_value, store=store)
|
mock_get_create_load_balancer_flow.return_value, store=store)
|
||||||
mock_eng.run.assert_any_call()
|
mock_eng.run.assert_any_call()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_delete_load_balancer_flow',
|
'LoadBalancerFlows.get_delete_load_balancer_flow',
|
||||||
return_value=(_flow_mock, {'test': 'test'}))
|
return_value=(_flow_mock, {'test': 'test'}))
|
||||||
def test_delete_load_balancer_without_cascade(self,
|
def test_delete_load_balancer_without_cascade(self,
|
||||||
@ -619,7 +619,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
)
|
)
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_cascade_delete_load_balancer_flow',
|
'LoadBalancerFlows.get_cascade_delete_load_balancer_flow',
|
||||||
return_value=(_flow_mock, {'test': 'test'}))
|
return_value=(_flow_mock, {'test': 'test'}))
|
||||||
def test_delete_load_balancer_with_cascade(self,
|
def test_delete_load_balancer_with_cascade(self,
|
||||||
@ -657,7 +657,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
)
|
)
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_update_load_balancer_flow',
|
'LoadBalancerFlows.get_update_load_balancer_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@mock.patch('octavia.db.repositories.ListenerRepository.get_all',
|
@mock.patch('octavia.db.repositories.ListenerRepository.get_all',
|
||||||
@ -698,7 +698,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'member_flows.MemberFlows.get_create_member_flow',
|
'member_flows.MemberFlows.get_create_member_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_member(self,
|
def test_create_member(self,
|
||||||
@ -734,7 +734,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_member_repo_get.call_count)
|
self.assertEqual(2, mock_member_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'member_flows.MemberFlows.get_delete_member_flow',
|
'member_flows.MemberFlows.get_delete_member_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_delete_member(self,
|
def test_delete_member(self,
|
||||||
@ -768,7 +768,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'member_flows.MemberFlows.get_update_member_flow',
|
'member_flows.MemberFlows.get_update_member_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_member(self,
|
def test_update_member(self,
|
||||||
@ -805,7 +805,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'member_flows.MemberFlows.get_batch_update_members_flow',
|
'member_flows.MemberFlows.get_batch_update_members_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_batch_update_members(self,
|
def test_batch_update_members(self,
|
||||||
@ -837,7 +837,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'pool_flows.PoolFlows.get_create_pool_flow',
|
'pool_flows.PoolFlows.get_create_pool_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_pool(self,
|
def test_create_pool(self,
|
||||||
@ -871,7 +871,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_pool_repo_get.call_count)
|
self.assertEqual(2, mock_pool_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'pool_flows.PoolFlows.get_delete_pool_flow',
|
'pool_flows.PoolFlows.get_delete_pool_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_delete_pool(self,
|
def test_delete_pool(self,
|
||||||
@ -903,7 +903,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'pool_flows.PoolFlows.get_update_pool_flow',
|
'pool_flows.PoolFlows.get_update_pool_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_pool(self,
|
def test_update_pool(self,
|
||||||
@ -938,7 +938,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7policy_flows.L7PolicyFlows.get_create_l7policy_flow',
|
'l7policy_flows.L7PolicyFlows.get_create_l7policy_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_l7policy(self,
|
def test_create_l7policy(self,
|
||||||
@ -972,7 +972,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_l7policy_repo_get.call_count)
|
self.assertEqual(2, mock_l7policy_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7policy_flows.L7PolicyFlows.get_delete_l7policy_flow',
|
'l7policy_flows.L7PolicyFlows.get_delete_l7policy_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_delete_l7policy(self,
|
def test_delete_l7policy(self,
|
||||||
@ -1004,7 +1004,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7policy_flows.L7PolicyFlows.get_update_l7policy_flow',
|
'l7policy_flows.L7PolicyFlows.get_update_l7policy_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_l7policy(self,
|
def test_update_l7policy(self,
|
||||||
@ -1039,7 +1039,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7rule_flows.L7RuleFlows.get_create_l7rule_flow',
|
'l7rule_flows.L7RuleFlows.get_create_l7rule_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_create_l7rule(self,
|
def test_create_l7rule(self,
|
||||||
@ -1074,7 +1074,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
self.assertEqual(2, mock_l7rule_repo_get.call_count)
|
self.assertEqual(2, mock_l7rule_repo_get.call_count)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7rule_flows.L7RuleFlows.get_delete_l7rule_flow',
|
'l7rule_flows.L7RuleFlows.get_delete_l7rule_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_delete_l7rule(self,
|
def test_delete_l7rule(self,
|
||||||
@ -1107,7 +1107,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
_flow_mock.run.assert_called_once_with()
|
_flow_mock.run.assert_called_once_with()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'l7rule_flows.L7RuleFlows.get_update_l7rule_flow',
|
'l7rule_flows.L7RuleFlows.get_update_l7rule_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_l7rule(self,
|
def test_update_l7rule(self,
|
||||||
@ -1145,7 +1145,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict', return_value={})
|
'get_flavor_metadata_dict', return_value={})
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_failover_flow',
|
'amphora_flows.AmphoraFlows.get_failover_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
@ -1185,8 +1185,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
mock_update.assert_called_with(_db_session, LB_ID,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'_perform_amphora_failover')
|
'ControllerWorker._perform_amphora_failover')
|
||||||
def test_failover_amp_missing_amp(self,
|
def test_failover_amp_missing_amp(self,
|
||||||
mock_perform_amp_failover,
|
mock_perform_amp_failover,
|
||||||
mock_api_get_session,
|
mock_api_get_session,
|
||||||
@ -1208,8 +1208,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
mock_perform_amp_failover.assert_not_called()
|
mock_perform_amp_failover.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'_perform_amphora_failover')
|
'ControllerWorker._perform_amphora_failover')
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_failover_amp_flow_exception(self,
|
def test_failover_amp_flow_exception(self,
|
||||||
mock_update,
|
mock_update,
|
||||||
@ -1232,8 +1232,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
mock_update.assert_called_with(_db_session, LB_ID,
|
||||||
provisioning_status=constants.ERROR)
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.controller_worker.ControllerWorker.'
|
@mock.patch('octavia.controller.worker.v1.controller_worker.'
|
||||||
'_perform_amphora_failover')
|
'ControllerWorker._perform_amphora_failover')
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_failover_amp_no_lb(self,
|
def test_failover_amp_no_lb(self,
|
||||||
mock_lb_update,
|
mock_lb_update,
|
||||||
@ -1287,7 +1287,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID)
|
mock_delete.assert_called_with(_db_session, amphora_id=AMP_ID)
|
||||||
mock_taskflow_load.assert_not_called()
|
mock_taskflow_load.assert_not_called()
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.'
|
@mock.patch('octavia.controller.worker.v1.'
|
||||||
'controller_worker.ControllerWorker._perform_amphora_failover')
|
'controller_worker.ControllerWorker._perform_amphora_failover')
|
||||||
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
def test_failover_loadbalancer(self,
|
def test_failover_loadbalancer(self,
|
||||||
@ -1336,7 +1336,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict', return_value={})
|
'get_flavor_metadata_dict', return_value={})
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.get_failover_flow',
|
'amphora_flows.AmphoraFlows.get_failover_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
@mock.patch(
|
@mock.patch(
|
||||||
@ -1383,7 +1383,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_update.assert_called_with(_db_session, LB_ID,
|
mock_update.assert_called_with(_db_session, LB_ID,
|
||||||
provisioning_status=constants.ACTIVE)
|
provisioning_status=constants.ACTIVE)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow',
|
'amphora_flows.AmphoraFlows.cert_rotate_amphora_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_amphora_cert_rotation(self,
|
def test_amphora_cert_rotation(self,
|
||||||
@ -1412,7 +1412,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict')
|
'get_flavor_metadata_dict')
|
||||||
@mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora')
|
@mock.patch('octavia.db.repositories.AmphoraRepository.get_lb_for_amphora')
|
||||||
@mock.patch('octavia.controller.worker.flows.'
|
@mock.patch('octavia.controller.worker.v1.flows.'
|
||||||
'amphora_flows.AmphoraFlows.update_amphora_config_flow',
|
'amphora_flows.AmphoraFlows.update_amphora_config_flow',
|
||||||
return_value=_flow_mock)
|
return_value=_flow_mock)
|
||||||
def test_update_amphora_agent_config(self,
|
def test_update_amphora_agent_config(self,
|
11
octavia/tests/unit/controller/worker/v2/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v2/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
11
octavia/tests/unit/controller/worker/v2/flows/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v2/flows/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -0,0 +1,422 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import data_models
|
||||||
|
from octavia.controller.worker.v2.flows import amphora_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
AUTH_VERSION = '2'
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: We patch the get_network_driver for all the calls so we don't
|
||||||
|
# inadvertently make real calls.
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
class TestAmphoraFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestAmphoraFlows, self).setUp()
|
||||||
|
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker",
|
||||||
|
amphora_driver='amphora_haproxy_rest_driver')
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=False)
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
self.amp1 = data_models.Amphora(id=1)
|
||||||
|
self.amp2 = data_models.Amphora(id=2)
|
||||||
|
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
|
||||||
|
self.lb = data_models.LoadBalancer(
|
||||||
|
id=4, amphorae=[self.amp1, self.amp2, self.amp3])
|
||||||
|
|
||||||
|
def test_get_create_amphora_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_create_amphora_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_create_amphora_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_STANDALONE)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_STANDALONE)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_cert_master_create_amphora_for_lb_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_MASTER)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=True)
|
||||||
|
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_MASTER)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=False)
|
||||||
|
|
||||||
|
def test_get_cert_backup_create_amphora_for_lb_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_BACKUP)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_cert_bogus_create_amphora_for_lb_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', 'BOGUS_ROLE')
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(3, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=True)
|
||||||
|
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
amp_flow = self.AmpFlow._get_create_amp_for_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_BACKUP)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(amp_flow.provides))
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=False)
|
||||||
|
|
||||||
|
def test_get_delete_amphora_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_delete_amphora_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(0, len(amp_flow.provides))
|
||||||
|
self.assertEqual(1, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_allocate_amp_to_lb_decider(self, mock_get_net_driver):
|
||||||
|
history = mock.MagicMock()
|
||||||
|
values = mock.MagicMock(side_effect=[['TEST'], [None]])
|
||||||
|
history.values = values
|
||||||
|
result = self.AmpFlow._allocate_amp_to_lb_decider(history)
|
||||||
|
self.assertTrue(result)
|
||||||
|
result = self.AmpFlow._allocate_amp_to_lb_decider(history)
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
def test_create_new_amp_for_lb_decider(self, mock_get_net_driver):
|
||||||
|
history = mock.MagicMock()
|
||||||
|
values = mock.MagicMock(side_effect=[[None], ['TEST']])
|
||||||
|
history.values = values
|
||||||
|
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
|
||||||
|
self.assertTrue(result)
|
||||||
|
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
def test_get_failover_flow_allocated(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_flow(
|
||||||
|
load_balancer=self.lb)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.assertEqual(12, len(amp_flow.provides))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_flow(
|
||||||
|
role=constants.ROLE_MASTER, load_balancer=self.lb)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.assertEqual(12, len(amp_flow.provides))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_flow(
|
||||||
|
role=constants.ROLE_BACKUP, load_balancer=self.lb)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.assertEqual(12, len(amp_flow.provides))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_flow(
|
||||||
|
role='BOGUSROLE', load_balancer=self.lb)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LISTENERS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(amp_flow.requires))
|
||||||
|
self.assertEqual(12, len(amp_flow.provides))
|
||||||
|
|
||||||
|
def test_get_failover_flow_spare(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.get_failover_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.requires))
|
||||||
|
self.assertEqual(0, len(amp_flow.provides))
|
||||||
|
|
||||||
|
def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_rotate_flow = self.AmpFlow.cert_rotate_amphora_flow()
|
||||||
|
self.assertIsInstance(amp_rotate_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_rotate_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_rotate_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_rotate_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_rotate_flow.requires))
|
||||||
|
|
||||||
|
def test_get_vrrp_subflow(self, mock_get_net_driver):
|
||||||
|
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123')
|
||||||
|
|
||||||
|
self.assertIsInstance(vrrp_subflow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(vrrp_subflow.provides))
|
||||||
|
self.assertEqual(1, len(vrrp_subflow.requires))
|
||||||
|
|
||||||
|
def test_get_post_map_lb_subflow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
self.AmpFlow = amphora_flows.AmphoraFlows()
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_MASTER)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_BACKUP)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
|
||||||
|
'SOMEPREFIX', constants.ROLE_STANDALONE)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow._get_post_map_lb_subflow(
|
||||||
|
'SOMEPREFIX', 'BOGUS_ROLE')
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_update_amphora_config_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
amp_flow = self.AmpFlow.update_amphora_config_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
self.assertEqual(0, len(amp_flow.provides))
|
@ -0,0 +1,72 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import health_monitor_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestHealthMonitorFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.HealthMonitorFlow = health_monitor_flows.HealthMonitorFlows()
|
||||||
|
|
||||||
|
super(TestHealthMonitorFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_health_monitor_flow(self):
|
||||||
|
|
||||||
|
health_mon_flow = (self.HealthMonitorFlow.
|
||||||
|
get_create_health_monitor_flow())
|
||||||
|
|
||||||
|
self.assertIsInstance(health_mon_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, health_mon_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(health_mon_flow.requires))
|
||||||
|
self.assertEqual(0, len(health_mon_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_health_monitor_flow(self):
|
||||||
|
|
||||||
|
health_mon_flow = (self.HealthMonitorFlow.
|
||||||
|
get_delete_health_monitor_flow())
|
||||||
|
|
||||||
|
self.assertIsInstance(health_mon_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, health_mon_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(health_mon_flow.requires))
|
||||||
|
self.assertEqual(0, len(health_mon_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_health_monitor_flow(self):
|
||||||
|
|
||||||
|
health_mon_flow = (self.HealthMonitorFlow.
|
||||||
|
get_update_health_monitor_flow())
|
||||||
|
|
||||||
|
self.assertIsInstance(health_mon_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.HEALTH_MON, health_mon_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, health_mon_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(health_mon_flow.requires))
|
||||||
|
self.assertEqual(0, len(health_mon_flow.provides))
|
@ -0,0 +1,67 @@
|
|||||||
|
# Copyright 2016 Blue Box, an IBM Company
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import l7policy_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestL7PolicyFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.L7PolicyFlow = l7policy_flows.L7PolicyFlows()
|
||||||
|
|
||||||
|
super(TestL7PolicyFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_l7policy_flow(self):
|
||||||
|
|
||||||
|
l7policy_flow = self.L7PolicyFlow.get_create_l7policy_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7policy_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(3, len(l7policy_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7policy_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_l7policy_flow(self):
|
||||||
|
|
||||||
|
l7policy_flow = self.L7PolicyFlow.get_delete_l7policy_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7policy_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(3, len(l7policy_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7policy_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_l7policy_flow(self):
|
||||||
|
|
||||||
|
l7policy_flow = self.L7PolicyFlow.get_update_l7policy_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7policy_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.L7POLICY, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7policy_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, l7policy_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(l7policy_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7policy_flow.provides))
|
@ -0,0 +1,67 @@
|
|||||||
|
# Copyright 2016 Blue Box, an IBM Company
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import l7rule_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestL7RuleFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.L7RuleFlow = l7rule_flows.L7RuleFlows()
|
||||||
|
|
||||||
|
super(TestL7RuleFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_l7rule_flow(self):
|
||||||
|
|
||||||
|
l7rule_flow = self.L7RuleFlow.get_create_l7rule_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7rule_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(l7rule_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7rule_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_l7rule_flow(self):
|
||||||
|
|
||||||
|
l7rule_flow = self.L7RuleFlow.get_delete_l7rule_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7rule_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.L7RULE, l7rule_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(l7rule_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7rule_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_l7rule_flow(self):
|
||||||
|
|
||||||
|
l7rule_flow = self.L7RuleFlow.get_update_l7rule_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(l7rule_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.L7RULE, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, l7rule_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, l7rule_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(l7rule_flow.requires))
|
||||||
|
self.assertEqual(0, len(l7rule_flow.provides))
|
@ -0,0 +1,90 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
import mock
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import listener_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: We patch the get_network_driver for all the calls so we don't
|
||||||
|
# inadvertently make real calls.
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
class TestListenerFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.ListenerFlow = listener_flows.ListenerFlows()
|
||||||
|
|
||||||
|
super(TestListenerFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_listener_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
listener_flow = self.ListenerFlow.get_create_listener_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(listener_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, listener_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(listener_flow.requires))
|
||||||
|
self.assertEqual(0, len(listener_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_listener_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
listener_flow = self.ListenerFlow.get_delete_listener_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(listener_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENER, listener_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(listener_flow.requires))
|
||||||
|
self.assertEqual(0, len(listener_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_listener_internal_flow(self, mock_get_net_driver):
|
||||||
|
listener_flow = self.ListenerFlow.get_delete_listener_internal_flow(
|
||||||
|
'test-listener')
|
||||||
|
|
||||||
|
self.assertIsInstance(listener_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn('test-listener', listener_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(listener_flow.requires))
|
||||||
|
self.assertEqual(0, len(listener_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_listener_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
listener_flow = self.ListenerFlow.get_update_listener_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(listener_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENER, listener_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, listener_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, listener_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, listener_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(listener_flow.requires))
|
||||||
|
self.assertEqual(0, len(listener_flow.provides))
|
||||||
|
|
||||||
|
def test_get_create_all_listeners_flow(self, mock_get_net_driver):
|
||||||
|
listeners_flow = self.ListenerFlow.get_create_all_listeners_flow()
|
||||||
|
self.assertIsInstance(listeners_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER, listeners_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, listeners_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, listeners_flow.provides)
|
||||||
|
self.assertEqual(2, len(listeners_flow.requires))
|
||||||
|
self.assertEqual(2, len(listeners_flow.provides))
|
@ -0,0 +1,227 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.controller.worker.v2.flows import load_balancer_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: We patch the get_network_driver for all the calls so we don't
|
||||||
|
# inadvertently make real calls.
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
class TestLoadBalancerFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestLoadBalancerFlows, self).setUp()
|
||||||
|
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker",
|
||||||
|
amphora_driver='amphora_haproxy_rest_driver')
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=False)
|
||||||
|
self.LBFlow = load_balancer_flows.LoadBalancerFlows()
|
||||||
|
|
||||||
|
def test_get_create_load_balancer_flow(self, mock_get_net_driver):
|
||||||
|
amp_flow = self.LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_SINGLE)
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
|
||||||
|
def test_get_create_active_standby_load_balancer_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
amp_flow = self.LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
|
||||||
|
def test_get_create_anti_affinity_active_standby_load_balancer_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=True)
|
||||||
|
|
||||||
|
self._LBFlow = load_balancer_flows.LoadBalancerFlows()
|
||||||
|
amp_flow = self._LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.conf.config(group="nova", enable_anti_affinity=False)
|
||||||
|
|
||||||
|
def test_get_create_bogus_topology_load_balancer_flow(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
self.assertRaises(exceptions.InvalidTopology,
|
||||||
|
self.LBFlow.get_create_load_balancer_flow,
|
||||||
|
'BOGUS')
|
||||||
|
|
||||||
|
def test_get_delete_load_balancer_flow(self, mock_get_net_driver):
|
||||||
|
lb_mock = mock.Mock()
|
||||||
|
listener_mock = mock.Mock()
|
||||||
|
listener_mock.id = '123'
|
||||||
|
lb_mock.listeners = [listener_mock]
|
||||||
|
|
||||||
|
lb_flow, store = self.LBFlow.get_delete_load_balancer_flow(lb_mock)
|
||||||
|
|
||||||
|
self.assertIsInstance(lb_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
|
||||||
|
self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(0, len(lb_flow.provides))
|
||||||
|
self.assertEqual(2, len(lb_flow.requires))
|
||||||
|
|
||||||
|
def test_get_delete_load_balancer_flow_cascade(self, mock_get_net_driver):
|
||||||
|
lb_mock = mock.Mock()
|
||||||
|
listener_mock = mock.Mock()
|
||||||
|
listener_mock.id = '123'
|
||||||
|
lb_mock.listeners = [listener_mock]
|
||||||
|
pool_mock = mock.Mock()
|
||||||
|
pool_mock.id = '345'
|
||||||
|
lb_mock.pools = [pool_mock]
|
||||||
|
l7_mock = mock.Mock()
|
||||||
|
l7_mock.id = '678'
|
||||||
|
listener_mock.l7policies = [l7_mock]
|
||||||
|
|
||||||
|
lb_flow, store = self.LBFlow.get_cascade_delete_load_balancer_flow(
|
||||||
|
lb_mock)
|
||||||
|
|
||||||
|
self.assertIsInstance(lb_flow, flow.Flow)
|
||||||
|
self.assertEqual({'listener_123': listener_mock,
|
||||||
|
'pool345': pool_mock}, store)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(lb_flow.provides))
|
||||||
|
self.assertEqual(4, len(lb_flow.requires))
|
||||||
|
|
||||||
|
def test_get_update_load_balancer_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
lb_flow = self.LBFlow.get_update_load_balancer_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(lb_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(0, len(lb_flow.provides))
|
||||||
|
self.assertEqual(3, len(lb_flow.requires))
|
||||||
|
|
||||||
|
def test_get_post_lb_amp_association_flow(self, mock_get_net_driver):
|
||||||
|
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
||||||
|
'123', constants.TOPOLOGY_SINGLE)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
# Test Active/Standby path
|
||||||
|
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
||||||
|
'123', constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
# Test mark_active=False
|
||||||
|
amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
|
||||||
|
'123', constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(2, len(amp_flow.provides))
|
||||||
|
self.assertEqual(2, len(amp_flow.requires))
|
||||||
|
|
||||||
|
def test_get_create_load_balancer_flows_single_listeners(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
create_flow = (
|
||||||
|
self.LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_SINGLE, True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.assertIsInstance(create_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, create_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, create_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, create_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, create_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, create_flow.provides)
|
||||||
|
self.assertIn(constants.DELTAS, create_flow.provides)
|
||||||
|
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
||||||
|
self.assertIn(constants.VIP, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(create_flow.requires))
|
||||||
|
self.assertEqual(13, len(create_flow.provides),
|
||||||
|
create_flow.provides)
|
||||||
|
|
||||||
|
def test_get_create_load_balancer_flows_active_standby_listeners(
|
||||||
|
self, mock_get_net_driver):
|
||||||
|
create_flow = (
|
||||||
|
self.LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY, True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
self.assertIsInstance(create_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, create_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, create_flow.requires)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, create_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, create_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, create_flow.provides)
|
||||||
|
self.assertIn(constants.DELTAS, create_flow.provides)
|
||||||
|
self.assertIn(constants.ADDED_PORTS, create_flow.provides)
|
||||||
|
self.assertIn(constants.VIP, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMP_DATA, create_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
|
||||||
|
create_flow.provides)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(create_flow.requires))
|
||||||
|
self.assertEqual(14, len(create_flow.provides),
|
||||||
|
create_flow.provides)
|
@ -0,0 +1,88 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import member_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: We patch the get_network_driver for all the calls so we don't
|
||||||
|
# inadvertently make real calls.
|
||||||
|
@mock.patch('octavia.common.utils.get_network_driver')
|
||||||
|
class TestMemberFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.MemberFlow = member_flows.MemberFlows()
|
||||||
|
|
||||||
|
super(TestMemberFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_member_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
member_flow = self.MemberFlow.get_create_member_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(member_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, member_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, member_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, member_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(member_flow.requires))
|
||||||
|
self.assertEqual(2, len(member_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_member_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
member_flow = self.MemberFlow.get_delete_member_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(member_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.MEMBER, member_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, member_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, member_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, member_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(member_flow.requires))
|
||||||
|
self.assertEqual(0, len(member_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_member_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
member_flow = self.MemberFlow.get_update_member_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(member_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.MEMBER, member_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, member_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, member_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, member_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, member_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(5, len(member_flow.requires))
|
||||||
|
self.assertEqual(0, len(member_flow.provides))
|
||||||
|
|
||||||
|
def test_get_batch_update_members_flow(self, mock_get_net_driver):
|
||||||
|
|
||||||
|
member_flow = self.MemberFlow.get_batch_update_members_flow(
|
||||||
|
[], [], [])
|
||||||
|
|
||||||
|
self.assertIsInstance(member_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, member_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, member_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, member_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(3, len(member_flow.requires))
|
||||||
|
self.assertEqual(2, len(member_flow.provides))
|
@ -0,0 +1,77 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from taskflow.patterns import linear_flow as flow
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.controller.worker.v2.flows import pool_flows
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
class TestPoolFlows(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.PoolFlow = pool_flows.PoolFlows()
|
||||||
|
|
||||||
|
super(TestPoolFlows, self).setUp()
|
||||||
|
|
||||||
|
def test_get_create_pool_flow(self):
|
||||||
|
|
||||||
|
pool_flow = self.PoolFlow.get_create_pool_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(pool_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, pool_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(3, len(pool_flow.requires))
|
||||||
|
self.assertEqual(0, len(pool_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_pool_flow(self):
|
||||||
|
|
||||||
|
pool_flow = self.PoolFlow.get_delete_pool_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(pool_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.LISTENERS, pool_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
|
||||||
|
self.assertIn(constants.POOL, pool_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(3, len(pool_flow.requires))
|
||||||
|
self.assertEqual(1, len(pool_flow.provides))
|
||||||
|
|
||||||
|
def test_get_delete_pool_flow_internal(self):
|
||||||
|
|
||||||
|
pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test')
|
||||||
|
|
||||||
|
self.assertIsInstance(pool_flow, flow.Flow)
|
||||||
|
self.assertIn('test', pool_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(1, len(pool_flow.requires))
|
||||||
|
self.assertEqual(1, len(pool_flow.provides))
|
||||||
|
|
||||||
|
def test_get_update_pool_flow(self):
|
||||||
|
|
||||||
|
pool_flow = self.PoolFlow.get_update_pool_flow()
|
||||||
|
|
||||||
|
self.assertIsInstance(pool_flow, flow.Flow)
|
||||||
|
|
||||||
|
self.assertIn(constants.POOL, pool_flow.requires)
|
||||||
|
self.assertIn(constants.LISTENERS, pool_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER, pool_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, pool_flow.requires)
|
||||||
|
|
||||||
|
self.assertEqual(4, len(pool_flow.requires))
|
||||||
|
self.assertEqual(0, len(pool_flow.provides))
|
11
octavia/tests/unit/controller/worker/v2/tasks/__init__.py
Normal file
11
octavia/tests/unit/controller/worker/v2/tasks/__init__.py
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
@ -0,0 +1,672 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
from taskflow.types import failure
|
||||||
|
|
||||||
|
from octavia.amphorae.driver_exceptions import exceptions as driver_except
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import data_models
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
|
||||||
|
from octavia.db import repositories as repo
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
AMP_ID = uuidutils.generate_uuid()
|
||||||
|
COMPUTE_ID = uuidutils.generate_uuid()
|
||||||
|
LISTENER_ID = uuidutils.generate_uuid()
|
||||||
|
LB_ID = uuidutils.generate_uuid()
|
||||||
|
CONN_MAX_RETRIES = 10
|
||||||
|
CONN_RETRY_INTERVAL = 6
|
||||||
|
FAKE_CONFIG_FILE = 'fake config file'
|
||||||
|
|
||||||
|
_amphora_mock = mock.MagicMock()
|
||||||
|
_amphora_mock.id = AMP_ID
|
||||||
|
_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
_load_balancer_mock = mock.MagicMock()
|
||||||
|
_load_balancer_mock.id = LB_ID
|
||||||
|
_listener_mock = mock.MagicMock()
|
||||||
|
_listener_mock.id = LISTENER_ID
|
||||||
|
_load_balancer_mock.listeners = [_listener_mock]
|
||||||
|
_vip_mock = mock.MagicMock()
|
||||||
|
_load_balancer_mock.vip = _vip_mock
|
||||||
|
_LB_mock = mock.MagicMock()
|
||||||
|
_amphorae_mock = [_amphora_mock]
|
||||||
|
_network_mock = mock.MagicMock()
|
||||||
|
_port_mock = mock.MagicMock()
|
||||||
|
_ports_mock = [_port_mock]
|
||||||
|
_session_mock = mock.MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.AmphoraRepository.update')
|
||||||
|
@mock.patch('octavia.db.repositories.ListenerRepository.update')
|
||||||
|
@mock.patch('octavia.db.repositories.ListenerRepository.get',
|
||||||
|
return_value=_listener_mock)
|
||||||
|
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
|
||||||
|
@mock.patch('octavia.controller.worker.v2.tasks.amphora_driver_tasks.LOG')
|
||||||
|
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=AMP_ID)
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
class TestAmphoraDriverTasks(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
|
||||||
|
_LB_mock.amphorae = [_amphora_mock]
|
||||||
|
_LB_mock.id = LB_ID
|
||||||
|
conf = oslo_fixture.Config(cfg.CONF)
|
||||||
|
conf.config(group="haproxy_amphora",
|
||||||
|
active_connection_max_retries=CONN_MAX_RETRIES)
|
||||||
|
conf.config(group="haproxy_amphora",
|
||||||
|
active_connection_rety_interval=CONN_RETRY_INTERVAL)
|
||||||
|
conf.config(group="controller_worker",
|
||||||
|
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
|
||||||
|
super(TestAmphoraDriverTasks, self).setUp()
|
||||||
|
|
||||||
|
def test_amp_listener_update(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
|
||||||
|
constants.REQ_READ_TIMEOUT: 2,
|
||||||
|
constants.CONN_MAX_RETRIES: 3,
|
||||||
|
constants.CONN_RETRY_INTERVAL: 4}
|
||||||
|
|
||||||
|
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
|
||||||
|
amp_list_update_obj.execute([_listener_mock], 0,
|
||||||
|
[_amphora_mock], timeout_dict)
|
||||||
|
|
||||||
|
mock_driver.update_amphora_listeners.assert_called_once_with(
|
||||||
|
[_listener_mock], 0, [_amphora_mock], timeout_dict)
|
||||||
|
|
||||||
|
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
|
||||||
|
|
||||||
|
amp_list_update_obj.execute([_listener_mock], 0,
|
||||||
|
[_amphora_mock], timeout_dict)
|
||||||
|
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, AMP_ID, status=constants.ERROR)
|
||||||
|
|
||||||
|
def test_listener_update(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
listener_update_obj = amphora_driver_tasks.ListenersUpdate()
|
||||||
|
listener_update_obj.execute(_load_balancer_mock, [_listener_mock])
|
||||||
|
|
||||||
|
mock_driver.update.assert_called_once_with(_listener_mock, _vip_mock)
|
||||||
|
|
||||||
|
# Test the revert
|
||||||
|
amp = listener_update_obj.revert(_load_balancer_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test the revert with exception
|
||||||
|
repo.ListenerRepository.update.reset_mock()
|
||||||
|
mock_listener_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = listener_update_obj.revert(_load_balancer_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_listeners_update(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
listeners_update_obj = amphora_driver_tasks.ListenersUpdate()
|
||||||
|
listeners = [data_models.Listener(id='listener1'),
|
||||||
|
data_models.Listener(id='listener2')]
|
||||||
|
vip = data_models.Vip(ip_address='10.0.0.1')
|
||||||
|
lb = data_models.LoadBalancer(id='lb1', listeners=listeners, vip=vip)
|
||||||
|
listeners_update_obj.execute(lb, listeners)
|
||||||
|
mock_driver.update.assert_has_calls([mock.call(listeners[0], vip),
|
||||||
|
mock.call(listeners[1], vip)])
|
||||||
|
self.assertEqual(2, mock_driver.update.call_count)
|
||||||
|
self.assertIsNotNone(listeners[0].load_balancer)
|
||||||
|
self.assertIsNotNone(listeners[1].load_balancer)
|
||||||
|
|
||||||
|
# Test the revert
|
||||||
|
amp = listeners_update_obj.revert(lb)
|
||||||
|
expected_db_calls = [mock.call(_session_mock,
|
||||||
|
id=listeners[0].id,
|
||||||
|
provisioning_status=constants.ERROR),
|
||||||
|
mock.call(_session_mock,
|
||||||
|
id=listeners[1].id,
|
||||||
|
provisioning_status=constants.ERROR)]
|
||||||
|
repo.ListenerRepository.update.has_calls(expected_db_calls)
|
||||||
|
self.assertEqual(2, repo.ListenerRepository.update.call_count)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_listener_stop(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
listener_stop_obj = amphora_driver_tasks.ListenerStop()
|
||||||
|
listener_stop_obj.execute(_load_balancer_mock, _listener_mock)
|
||||||
|
|
||||||
|
mock_driver.stop.assert_called_once_with(_listener_mock, _vip_mock)
|
||||||
|
|
||||||
|
# Test the revert
|
||||||
|
amp = listener_stop_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test the revert with exception
|
||||||
|
repo.ListenerRepository.update.reset_mock()
|
||||||
|
mock_listener_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = listener_stop_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_listener_start(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
listener_start_obj = amphora_driver_tasks.ListenerStart()
|
||||||
|
listener_start_obj.execute(_load_balancer_mock, _listener_mock)
|
||||||
|
|
||||||
|
mock_driver.start.assert_called_once_with(_listener_mock, _vip_mock)
|
||||||
|
|
||||||
|
# Test the revert
|
||||||
|
amp = listener_start_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test the revert with exception
|
||||||
|
repo.ListenerRepository.update.reset_mock()
|
||||||
|
mock_listener_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = listener_start_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_listener_delete(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
listener_delete_obj = amphora_driver_tasks.ListenerDelete()
|
||||||
|
listener_delete_obj.execute(_load_balancer_mock, _listener_mock)
|
||||||
|
|
||||||
|
mock_driver.delete.assert_called_once_with(_listener_mock, _vip_mock)
|
||||||
|
|
||||||
|
# Test the revert
|
||||||
|
amp = listener_delete_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test the revert with exception
|
||||||
|
repo.ListenerRepository.update.reset_mock()
|
||||||
|
mock_listener_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = listener_delete_obj.revert(_listener_mock)
|
||||||
|
repo.ListenerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LISTENER_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_amphora_get_info(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphora_get_info_obj = amphora_driver_tasks.AmphoraGetInfo()
|
||||||
|
amphora_get_info_obj.execute(_amphora_mock)
|
||||||
|
|
||||||
|
mock_driver.get_info.assert_called_once_with(
|
||||||
|
_amphora_mock)
|
||||||
|
|
||||||
|
def test_amphora_get_diagnostics(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphora_get_diagnostics_obj = (amphora_driver_tasks.
|
||||||
|
AmphoraGetDiagnostics())
|
||||||
|
amphora_get_diagnostics_obj.execute(_amphora_mock)
|
||||||
|
|
||||||
|
mock_driver.get_diagnostics.assert_called_once_with(
|
||||||
|
_amphora_mock)
|
||||||
|
|
||||||
|
def test_amphora_finalize(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphora_finalize_obj = amphora_driver_tasks.AmphoraFinalize()
|
||||||
|
amphora_finalize_obj.execute(_amphora_mock)
|
||||||
|
|
||||||
|
mock_driver.finalize_amphora.assert_called_once_with(
|
||||||
|
_amphora_mock)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
amp = amphora_finalize_obj.revert(None, _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
mock_amphora_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = amphora_finalize_obj.revert(None, _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_amphora_post_network_plug(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphora_post_network_plug_obj = (amphora_driver_tasks.
|
||||||
|
AmphoraPostNetworkPlug())
|
||||||
|
amphora_post_network_plug_obj.execute(_amphora_mock, _ports_mock)
|
||||||
|
|
||||||
|
(mock_driver.post_network_plug.
|
||||||
|
assert_called_once_with)(_amphora_mock, _port_mock)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
amp = amphora_post_network_plug_obj.revert(None, _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
mock_amphora_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = amphora_post_network_plug_obj.revert(None, _amphora_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_amphorae_post_network_plug(self, mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
mock_driver.get_network.return_value = _network_mock
|
||||||
|
_amphora_mock.id = AMP_ID
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
_LB_mock.amphorae = [_amphora_mock]
|
||||||
|
amphora_post_network_plug_obj = (amphora_driver_tasks.
|
||||||
|
AmphoraePostNetworkPlug())
|
||||||
|
|
||||||
|
port_mock = mock.Mock()
|
||||||
|
_deltas_mock = {_amphora_mock.id: [port_mock]}
|
||||||
|
|
||||||
|
amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock)
|
||||||
|
|
||||||
|
(mock_driver.post_network_plug.
|
||||||
|
assert_called_once_with(_amphora_mock, port_mock))
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
|
||||||
|
_deltas_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
mock_amphora_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
|
||||||
|
_deltas_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_amphora_post_vip_plug(self,
|
||||||
|
mock_loadbalancer_repo_update,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphorae_net_config_mock = mock.Mock()
|
||||||
|
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug()
|
||||||
|
amphora_post_vip_plug_obj.execute(_amphora_mock,
|
||||||
|
_LB_mock,
|
||||||
|
amphorae_net_config_mock)
|
||||||
|
|
||||||
|
mock_driver.post_vip_plug.assert_called_once_with(
|
||||||
|
_amphora_mock, _LB_mock, amphorae_net_config_mock)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
repo.LoadBalancerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LB_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert with repo exceptions
|
||||||
|
repo.AmphoraRepository.update.reset_mock()
|
||||||
|
repo.LoadBalancerRepository.update.reset_mock()
|
||||||
|
mock_amphora_repo_update.side_effect = Exception('fail')
|
||||||
|
mock_loadbalancer_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(None, _amphora_mock, _LB_mock)
|
||||||
|
repo.AmphoraRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=AMP_ID,
|
||||||
|
status=constants.ERROR)
|
||||||
|
repo.LoadBalancerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LB_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
|
||||||
|
def test_amphorae_post_vip_plug(self,
|
||||||
|
mock_loadbalancer_repo_update,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
|
||||||
|
amphorae_net_config_mock = mock.Mock()
|
||||||
|
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug()
|
||||||
|
amphora_post_vip_plug_obj.execute(_LB_mock,
|
||||||
|
amphorae_net_config_mock)
|
||||||
|
|
||||||
|
mock_driver.post_vip_plug.assert_called_once_with(
|
||||||
|
_amphora_mock, _LB_mock, amphorae_net_config_mock)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(None, _LB_mock)
|
||||||
|
repo.LoadBalancerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LB_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
repo.LoadBalancerRepository.update.reset_mock()
|
||||||
|
mock_loadbalancer_repo_update.side_effect = Exception('fail')
|
||||||
|
amp = amphora_post_vip_plug_obj.revert(None, _LB_mock)
|
||||||
|
repo.LoadBalancerRepository.update.assert_called_once_with(
|
||||||
|
_session_mock,
|
||||||
|
id=LB_ID,
|
||||||
|
provisioning_status=constants.ERROR)
|
||||||
|
|
||||||
|
self.assertIsNone(amp)
|
||||||
|
|
||||||
|
def test_amphora_cert_upload(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
pem_file_mock = fer.encrypt(
|
||||||
|
utils.get_six_compatible_value('test-pem-file'))
|
||||||
|
amphora_cert_upload_mock = amphora_driver_tasks.AmphoraCertUpload()
|
||||||
|
amphora_cert_upload_mock.execute(_amphora_mock, pem_file_mock)
|
||||||
|
|
||||||
|
mock_driver.upload_cert_amp.assert_called_once_with(
|
||||||
|
_amphora_mock, fer.decrypt(pem_file_mock))
|
||||||
|
|
||||||
|
def test_amphora_update_vrrp_interface(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
_LB_mock.amphorae = _amphorae_mock
|
||||||
|
|
||||||
|
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
|
||||||
|
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
|
||||||
|
|
||||||
|
amphora_update_vrrp_interface_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraUpdateVRRPInterface())
|
||||||
|
amphora_update_vrrp_interface_obj.execute(_LB_mock)
|
||||||
|
mock_driver.get_vrrp_interface.assert_called_once_with(
|
||||||
|
_amphora_mock, timeout_dict=timeout_dict)
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
_LB_mock.amphorae = _amphorae_mock
|
||||||
|
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
|
||||||
|
mock_amphora_repo_update.assert_called_with(_session_mock,
|
||||||
|
_amphora_mock.id,
|
||||||
|
vrrp_interface=None)
|
||||||
|
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
|
||||||
|
failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT"))
|
||||||
|
amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock)
|
||||||
|
self.assertFalse(mock_amphora_repo_update.called)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_amphora_repo_update.reset_mock()
|
||||||
|
mock_amphora_repo_update.side_effect = Exception('fail')
|
||||||
|
|
||||||
|
_LB_mock.amphorae = _amphorae_mock
|
||||||
|
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
|
||||||
|
mock_amphora_repo_update.assert_called_with(_session_mock,
|
||||||
|
_amphora_mock.id,
|
||||||
|
vrrp_interface=None)
|
||||||
|
|
||||||
|
def test_amphora_vrrp_update(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amphorae_network_config = mock.MagicMock()
|
||||||
|
amphora_vrrp_update_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraVRRPUpdate())
|
||||||
|
amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config)
|
||||||
|
mock_driver.update_vrrp_conf.assert_called_once_with(
|
||||||
|
_LB_mock, amphorae_network_config)
|
||||||
|
|
||||||
|
def test_amphora_vrrp_stop(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amphora_vrrp_stop_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraVRRPStop())
|
||||||
|
amphora_vrrp_stop_obj.execute(_LB_mock)
|
||||||
|
mock_driver.stop_vrrp_service.assert_called_once_with(_LB_mock)
|
||||||
|
|
||||||
|
def test_amphora_vrrp_start(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amphora_vrrp_start_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraVRRPStart())
|
||||||
|
amphora_vrrp_start_obj.execute(_LB_mock)
|
||||||
|
mock_driver.start_vrrp_service.assert_called_once_with(_LB_mock)
|
||||||
|
|
||||||
|
def test_amphora_compute_connectivity_wait(self,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
amp_compute_conn_wait_obj = (
|
||||||
|
amphora_driver_tasks.AmphoraComputeConnectivityWait())
|
||||||
|
amp_compute_conn_wait_obj.execute(_amphora_mock)
|
||||||
|
mock_driver.get_info.assert_called_once_with(_amphora_mock)
|
||||||
|
|
||||||
|
mock_driver.get_info.side_effect = driver_except.TimeOutException()
|
||||||
|
self.assertRaises(driver_except.TimeOutException,
|
||||||
|
amp_compute_conn_wait_obj.execute, _amphora_mock)
|
||||||
|
mock_amphora_repo_update.assert_called_once_with(
|
||||||
|
_session_mock, AMP_ID, status=constants.ERROR)
|
||||||
|
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.agent_jinja_cfg.'
|
||||||
|
'AgentJinjaTemplater.build_agent_config')
|
||||||
|
def test_amphora_config_update(self,
|
||||||
|
mock_build_config,
|
||||||
|
mock_driver,
|
||||||
|
mock_generate_uuid,
|
||||||
|
mock_log,
|
||||||
|
mock_get_session,
|
||||||
|
mock_listener_repo_get,
|
||||||
|
mock_listener_repo_update,
|
||||||
|
mock_amphora_repo_update):
|
||||||
|
mock_build_config.return_value = FAKE_CONFIG_FILE
|
||||||
|
amp_config_update_obj = amphora_driver_tasks.AmphoraConfigUpdate()
|
||||||
|
mock_driver.update_amphora_agent_config.side_effect = [
|
||||||
|
None, None, driver_except.AmpDriverNotImplementedError,
|
||||||
|
driver_except.TimeOutException]
|
||||||
|
# With Flavor
|
||||||
|
flavor = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY}
|
||||||
|
amp_config_update_obj.execute(_amphora_mock, flavor)
|
||||||
|
mock_build_config.assert_called_once_with(
|
||||||
|
_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
mock_driver.update_amphora_agent_config.assert_called_once_with(
|
||||||
|
_amphora_mock, FAKE_CONFIG_FILE)
|
||||||
|
# With no Flavor
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_build_config.reset_mock()
|
||||||
|
amp_config_update_obj.execute(_amphora_mock, None)
|
||||||
|
mock_build_config.assert_called_once_with(
|
||||||
|
_amphora_mock.id, constants.TOPOLOGY_SINGLE)
|
||||||
|
mock_driver.update_amphora_agent_config.assert_called_once_with(
|
||||||
|
_amphora_mock, FAKE_CONFIG_FILE)
|
||||||
|
# With amphora that does not support config update
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_build_config.reset_mock()
|
||||||
|
amp_config_update_obj.execute(_amphora_mock, flavor)
|
||||||
|
mock_build_config.assert_called_once_with(
|
||||||
|
_amphora_mock.id, constants.TOPOLOGY_ACTIVE_STANDBY)
|
||||||
|
mock_driver.update_amphora_agent_config.assert_called_once_with(
|
||||||
|
_amphora_mock, FAKE_CONFIG_FILE)
|
||||||
|
# With an unknown exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_build_config.reset_mock()
|
||||||
|
self.assertRaises(driver_except.TimeOutException,
|
||||||
|
amp_config_update_obj.execute,
|
||||||
|
_amphora_mock, flavor)
|
@ -0,0 +1,47 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from octavia.certificates.common import local
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker.v2.tasks import cert_task
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class TestCertTasks(base.TestCase):
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_execute(self, mock_driver):
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
dummy_cert = local.LocalCert(
|
||||||
|
utils.get_six_compatible_value('test_cert'),
|
||||||
|
utils.get_six_compatible_value('test_key'))
|
||||||
|
mock_driver.generate_cert_key_pair.side_effect = [dummy_cert]
|
||||||
|
c = cert_task.GenerateServerPEMTask()
|
||||||
|
pem = c.execute('123')
|
||||||
|
self.assertEqual(
|
||||||
|
fer.decrypt(pem),
|
||||||
|
dummy_cert.get_certificate() +
|
||||||
|
dummy_cert.get_private_key()
|
||||||
|
)
|
||||||
|
mock_driver.generate_cert_key_pair.assert_called_once_with(
|
||||||
|
cn='123', validity=CONF.certificates.cert_validity_time)
|
@ -0,0 +1,466 @@
|
|||||||
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from cryptography import fernet
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.common import utils
|
||||||
|
from octavia.controller.worker.v2.tasks import compute_tasks
|
||||||
|
from octavia.tests.common import utils as test_utils
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
|
||||||
|
|
||||||
|
AMP_FLAVOR_ID = '10'
|
||||||
|
AMP_IMAGE_ID = '11'
|
||||||
|
AMP_IMAGE_TAG = 'glance_tag'
|
||||||
|
AMP_SSH_KEY_NAME = None
|
||||||
|
AMP_NET = [uuidutils.generate_uuid()]
|
||||||
|
AMP_SEC_GROUPS = []
|
||||||
|
AMP_WAIT = 12
|
||||||
|
AMPHORA_ID = uuidutils.generate_uuid()
|
||||||
|
COMPUTE_ID = uuidutils.generate_uuid()
|
||||||
|
LB_NET_IP = '192.0.2.1'
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
SERVER_GRPOUP_ID = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
|
||||||
|
class TestException(Exception):
|
||||||
|
|
||||||
|
def __init__(self, value):
|
||||||
|
self.value = value
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return repr(self.value)
|
||||||
|
|
||||||
|
_amphora_mock = mock.MagicMock()
|
||||||
|
_amphora_mock.id = AMPHORA_ID
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
_load_balancer_mock = mock.MagicMock()
|
||||||
|
_load_balancer_mock.amphorae = [_amphora_mock]
|
||||||
|
_port = mock.MagicMock()
|
||||||
|
_port.id = PORT_ID
|
||||||
|
|
||||||
|
|
||||||
|
class TestComputeTasks(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_flavor_id=AMP_FLAVOR_ID)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_image_id=AMP_IMAGE_ID)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_image_tag=AMP_IMAGE_TAG)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_ssh_key_name=AMP_SSH_KEY_NAME)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_boot_network_list=AMP_NET)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_active_wait_sec=AMP_WAIT)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_secgroup_list=AMP_SEC_GROUPS)
|
||||||
|
self.conf.config(group="controller_worker", amp_image_owner_id='')
|
||||||
|
|
||||||
|
_amphora_mock.id = AMPHORA_ID
|
||||||
|
_amphora_mock.status = constants.AMPHORA_ALLOCATED
|
||||||
|
|
||||||
|
logging_mock = mock.MagicMock()
|
||||||
|
compute_tasks.LOG = logging_mock
|
||||||
|
|
||||||
|
super(TestComputeTasks, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('jinja2.Environment.get_template')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.'
|
||||||
|
'agent_jinja_cfg.AgentJinjaTemplater.'
|
||||||
|
'build_agent_config', return_value='test_conf')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_compute_create(self, mock_driver, mock_conf, mock_jinja):
|
||||||
|
|
||||||
|
image_owner_id = uuidutils.generate_uuid()
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_image_owner_id=image_owner_id)
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
mock_driver.build.return_value = COMPUTE_ID
|
||||||
|
# Test execute()
|
||||||
|
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
|
||||||
|
server_group_id=SERVER_GRPOUP_ID)
|
||||||
|
|
||||||
|
# Validate that the build method was called properly
|
||||||
|
mock_driver.build.assert_called_once_with(
|
||||||
|
name="amphora-" + _amphora_mock.id,
|
||||||
|
amphora_flavor=AMP_FLAVOR_ID,
|
||||||
|
image_id=AMP_IMAGE_ID,
|
||||||
|
image_tag=AMP_IMAGE_TAG,
|
||||||
|
image_owner=image_owner_id,
|
||||||
|
key_name=AMP_SSH_KEY_NAME,
|
||||||
|
sec_groups=AMP_SEC_GROUPS,
|
||||||
|
network_ids=AMP_NET,
|
||||||
|
port_ids=[PORT_ID],
|
||||||
|
config_drive_files={'/etc/octavia/'
|
||||||
|
'amphora-agent.conf': 'test_conf'},
|
||||||
|
user_data=None,
|
||||||
|
server_group_id=SERVER_GRPOUP_ID)
|
||||||
|
|
||||||
|
# Make sure it returns the expected compute_id
|
||||||
|
self.assertEqual(COMPUTE_ID, compute_id)
|
||||||
|
|
||||||
|
# Test that a build exception is raised
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
createcompute.execute,
|
||||||
|
_amphora_mock, config_drive_files='test_cert')
|
||||||
|
|
||||||
|
# Test revert()
|
||||||
|
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
createcompute.revert(compute_id, _amphora_mock.id)
|
||||||
|
|
||||||
|
# Validate that the delete method was called properly
|
||||||
|
mock_driver.delete.assert_called_once_with(
|
||||||
|
COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test that a delete exception is not raised
|
||||||
|
|
||||||
|
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
|
||||||
|
|
||||||
|
@mock.patch('jinja2.Environment.get_template')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.'
|
||||||
|
'agent_jinja_cfg.AgentJinjaTemplater.'
|
||||||
|
'build_agent_config', return_value='test_conf')
|
||||||
|
@mock.patch('octavia.common.jinja.'
|
||||||
|
'user_data_jinja_cfg.UserDataJinjaCfg.'
|
||||||
|
'build_user_data_config', return_value='test_conf')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_compute_create_user_data(self, mock_driver,
|
||||||
|
mock_ud_conf, mock_conf, mock_jinja):
|
||||||
|
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", user_data_config_drive=True)
|
||||||
|
mock_ud_conf.return_value = 'test_ud_conf'
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
mock_driver.build.return_value = COMPUTE_ID
|
||||||
|
# Test execute()
|
||||||
|
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port])
|
||||||
|
|
||||||
|
# Validate that the build method was called properly
|
||||||
|
mock_driver.build.assert_called_once_with(
|
||||||
|
name="amphora-" + _amphora_mock.id,
|
||||||
|
amphora_flavor=AMP_FLAVOR_ID,
|
||||||
|
image_id=AMP_IMAGE_ID,
|
||||||
|
image_tag=AMP_IMAGE_TAG,
|
||||||
|
image_owner='',
|
||||||
|
key_name=AMP_SSH_KEY_NAME,
|
||||||
|
sec_groups=AMP_SEC_GROUPS,
|
||||||
|
network_ids=AMP_NET,
|
||||||
|
port_ids=[PORT_ID],
|
||||||
|
config_drive_files=None,
|
||||||
|
user_data='test_ud_conf',
|
||||||
|
server_group_id=None)
|
||||||
|
|
||||||
|
# Make sure it returns the expected compute_id
|
||||||
|
self.assertEqual(COMPUTE_ID, compute_id)
|
||||||
|
|
||||||
|
# Test that a build exception is raised
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
createcompute.execute,
|
||||||
|
_amphora_mock, config_drive_files='test_cert')
|
||||||
|
|
||||||
|
# Test revert()
|
||||||
|
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
createcompute.revert(compute_id, _amphora_mock.id)
|
||||||
|
|
||||||
|
# Validate that the delete method was called properly
|
||||||
|
mock_driver.delete.assert_called_once_with(
|
||||||
|
COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test that a delete exception is not raised
|
||||||
|
|
||||||
|
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
|
||||||
|
|
||||||
|
@mock.patch('jinja2.Environment.get_template')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.'
|
||||||
|
'agent_jinja_cfg.AgentJinjaTemplater.'
|
||||||
|
'build_agent_config', return_value='test_conf')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_compute_create_without_ssh_access(self, mock_driver,
|
||||||
|
mock_conf, mock_jinja):
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
mock_driver.build.return_value = COMPUTE_ID
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", amp_ssh_access_allowed=False)
|
||||||
|
self.conf.config(
|
||||||
|
group="controller_worker", user_data_config_drive=False)
|
||||||
|
|
||||||
|
# Test execute()
|
||||||
|
compute_id = createcompute.execute(_amphora_mock.id, ports=[_port],
|
||||||
|
server_group_id=SERVER_GRPOUP_ID)
|
||||||
|
|
||||||
|
# Validate that the build method was called properly
|
||||||
|
mock_driver.build.assert_called_once_with(
|
||||||
|
name="amphora-" + _amphora_mock.id,
|
||||||
|
amphora_flavor=AMP_FLAVOR_ID,
|
||||||
|
image_id=AMP_IMAGE_ID,
|
||||||
|
image_tag=AMP_IMAGE_TAG,
|
||||||
|
image_owner='',
|
||||||
|
key_name=None,
|
||||||
|
sec_groups=AMP_SEC_GROUPS,
|
||||||
|
network_ids=AMP_NET,
|
||||||
|
port_ids=[PORT_ID],
|
||||||
|
config_drive_files={'/etc/octavia/'
|
||||||
|
'amphora-agent.conf': 'test_conf'},
|
||||||
|
user_data=None,
|
||||||
|
server_group_id=SERVER_GRPOUP_ID)
|
||||||
|
|
||||||
|
self.assertEqual(COMPUTE_ID, compute_id)
|
||||||
|
|
||||||
|
# Test that a build exception is raised
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
createcompute.execute,
|
||||||
|
_amphora_mock, config_drive_files='test_cert')
|
||||||
|
|
||||||
|
# Test revert()
|
||||||
|
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
createcompute.revert(compute_id, _amphora_mock.id)
|
||||||
|
|
||||||
|
# Validate that the delete method was called properly
|
||||||
|
mock_driver.delete.assert_called_once_with(
|
||||||
|
COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test that a delete exception is not raised
|
||||||
|
|
||||||
|
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
|
||||||
|
|
||||||
|
@mock.patch('jinja2.Environment.get_template')
|
||||||
|
@mock.patch('octavia.amphorae.backends.agent.'
|
||||||
|
'agent_jinja_cfg.AgentJinjaTemplater.'
|
||||||
|
'build_agent_config', return_value='test_conf')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_compute_create_cert(self, mock_driver, mock_conf, mock_jinja):
|
||||||
|
createcompute = compute_tasks.CertComputeCreate()
|
||||||
|
key = utils.get_six_compatible_server_certs_key_passphrase()
|
||||||
|
fer = fernet.Fernet(key)
|
||||||
|
|
||||||
|
mock_driver.build.return_value = COMPUTE_ID
|
||||||
|
path = '/etc/octavia/certs/ca_01.pem'
|
||||||
|
self.useFixture(test_utils.OpenFixture(path, 'test'))
|
||||||
|
# Test execute()
|
||||||
|
test_cert = fer.encrypt(
|
||||||
|
utils.get_six_compatible_value('test_cert')
|
||||||
|
)
|
||||||
|
compute_id = createcompute.execute(_amphora_mock.id, test_cert,
|
||||||
|
server_group_id=SERVER_GRPOUP_ID
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate that the build method was called properly
|
||||||
|
mock_driver.build.assert_called_once_with(
|
||||||
|
name="amphora-" + _amphora_mock.id,
|
||||||
|
amphora_flavor=AMP_FLAVOR_ID,
|
||||||
|
image_id=AMP_IMAGE_ID,
|
||||||
|
image_tag=AMP_IMAGE_TAG,
|
||||||
|
image_owner='',
|
||||||
|
key_name=AMP_SSH_KEY_NAME,
|
||||||
|
sec_groups=AMP_SEC_GROUPS,
|
||||||
|
network_ids=AMP_NET,
|
||||||
|
port_ids=[],
|
||||||
|
user_data=None,
|
||||||
|
config_drive_files={
|
||||||
|
'/etc/octavia/certs/server.pem': fer.decrypt(test_cert),
|
||||||
|
'/etc/octavia/certs/client_ca.pem': 'test',
|
||||||
|
'/etc/octavia/amphora-agent.conf': 'test_conf'},
|
||||||
|
server_group_id=SERVER_GRPOUP_ID)
|
||||||
|
|
||||||
|
self.assertEqual(COMPUTE_ID, compute_id)
|
||||||
|
|
||||||
|
# Test that a build exception is raised
|
||||||
|
self.useFixture(test_utils.OpenFixture(path, 'test'))
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
self.assertRaises(TypeError,
|
||||||
|
createcompute.execute,
|
||||||
|
_amphora_mock,
|
||||||
|
config_drive_files=test_cert)
|
||||||
|
|
||||||
|
# Test revert()
|
||||||
|
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
|
||||||
|
createcompute = compute_tasks.ComputeCreate()
|
||||||
|
createcompute.revert(compute_id, _amphora_mock.id)
|
||||||
|
|
||||||
|
# Validate that the delete method was called properly
|
||||||
|
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
# Test that a delete exception is not raised
|
||||||
|
|
||||||
|
createcompute.revert(COMPUTE_ID, _amphora_mock.id)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.amphora_rate_limit'
|
||||||
|
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
@mock.patch('time.sleep')
|
||||||
|
def test_compute_wait(self,
|
||||||
|
mock_time_sleep,
|
||||||
|
mock_driver,
|
||||||
|
mock_remove_from_build_queue):
|
||||||
|
|
||||||
|
self.conf.config(group='haproxy_amphora', build_rate_limit=5)
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
_amphora_mock.status = constants.ACTIVE
|
||||||
|
_amphora_mock.lb_network_ip = LB_NET_IP
|
||||||
|
|
||||||
|
mock_driver.get_amphora.return_value = _amphora_mock, None
|
||||||
|
|
||||||
|
computewait = compute_tasks.ComputeActiveWait()
|
||||||
|
computewait.execute(COMPUTE_ID, AMPHORA_ID)
|
||||||
|
|
||||||
|
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
_amphora_mock.status = constants.DELETED
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.ComputeWaitTimeoutException,
|
||||||
|
computewait.execute,
|
||||||
|
_amphora_mock, AMPHORA_ID)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.amphora_rate_limit'
|
||||||
|
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
@mock.patch('time.sleep')
|
||||||
|
def test_compute_wait_error_status(self,
|
||||||
|
mock_time_sleep,
|
||||||
|
mock_driver,
|
||||||
|
mock_remove_from_build_queue):
|
||||||
|
|
||||||
|
self.conf.config(group='haproxy_amphora', build_rate_limit=5)
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
_amphora_mock.status = constants.ACTIVE
|
||||||
|
_amphora_mock.lb_network_ip = LB_NET_IP
|
||||||
|
|
||||||
|
mock_driver.get_amphora.return_value = _amphora_mock, None
|
||||||
|
|
||||||
|
computewait = compute_tasks.ComputeActiveWait()
|
||||||
|
computewait.execute(COMPUTE_ID, AMPHORA_ID)
|
||||||
|
|
||||||
|
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
_amphora_mock.status = constants.ERROR
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.ComputeBuildException,
|
||||||
|
computewait.execute,
|
||||||
|
_amphora_mock, AMPHORA_ID)
|
||||||
|
|
||||||
|
@mock.patch('octavia.controller.worker.amphora_rate_limit'
|
||||||
|
'.AmphoraBuildRateLimit.remove_from_build_req_queue')
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
@mock.patch('time.sleep')
|
||||||
|
def test_compute_wait_skipped(self,
|
||||||
|
mock_time_sleep,
|
||||||
|
mock_driver,
|
||||||
|
mock_remove_from_build_queue):
|
||||||
|
_amphora_mock.compute_id = COMPUTE_ID
|
||||||
|
_amphora_mock.status = constants.ACTIVE
|
||||||
|
_amphora_mock.lb_network_ip = LB_NET_IP
|
||||||
|
|
||||||
|
mock_driver.get_amphora.return_value = _amphora_mock, None
|
||||||
|
|
||||||
|
computewait = compute_tasks.ComputeActiveWait()
|
||||||
|
computewait.execute(COMPUTE_ID, AMPHORA_ID)
|
||||||
|
|
||||||
|
mock_driver.get_amphora.assert_called_once_with(COMPUTE_ID)
|
||||||
|
mock_remove_from_build_queue.assert_not_called()
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_delete_amphorae_on_load_balancer(self, mock_driver):
|
||||||
|
|
||||||
|
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
|
||||||
|
delete_amps.execute(_load_balancer_mock)
|
||||||
|
|
||||||
|
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_compute_delete(self, mock_driver):
|
||||||
|
|
||||||
|
delete_compute = compute_tasks.ComputeDelete()
|
||||||
|
delete_compute.execute(_amphora_mock)
|
||||||
|
|
||||||
|
mock_driver.delete.assert_called_once_with(COMPUTE_ID)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_nova_server_group_create(self, mock_driver):
|
||||||
|
nova_sever_group_obj = compute_tasks.NovaServerGroupCreate()
|
||||||
|
|
||||||
|
server_group_test_id = '6789'
|
||||||
|
fake_server_group = mock.MagicMock()
|
||||||
|
fake_server_group.id = server_group_test_id
|
||||||
|
fake_server_group.policy = 'anti-affinity'
|
||||||
|
mock_driver.create_server_group.return_value = fake_server_group
|
||||||
|
|
||||||
|
# Test execute()
|
||||||
|
sg_id = nova_sever_group_obj.execute('123')
|
||||||
|
|
||||||
|
# Validate that the build method was called properly
|
||||||
|
mock_driver.create_server_group.assert_called_once_with(
|
||||||
|
'octavia-lb-123', 'anti-affinity')
|
||||||
|
|
||||||
|
# Make sure it returns the expected server group_id
|
||||||
|
self.assertEqual(server_group_test_id, sg_id)
|
||||||
|
|
||||||
|
# Test revert()
|
||||||
|
nova_sever_group_obj.revert(sg_id)
|
||||||
|
|
||||||
|
# Validate that the delete_server_group method was called properly
|
||||||
|
mock_driver.delete_server_group.assert_called_once_with(sg_id)
|
||||||
|
|
||||||
|
# Test revert with exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
mock_driver.delete_server_group.side_effect = Exception('DelSGExcept')
|
||||||
|
nova_sever_group_obj.revert(sg_id)
|
||||||
|
mock_driver.delete_server_group.assert_called_once_with(sg_id)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_nova_server_group_delete_with_sever_group_id(self, mock_driver):
|
||||||
|
nova_sever_group_obj = compute_tasks.NovaServerGroupDelete()
|
||||||
|
sg_id = '6789'
|
||||||
|
nova_sever_group_obj.execute(sg_id)
|
||||||
|
mock_driver.delete_server_group.assert_called_once_with(sg_id)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_nova_server_group_delete_with_None(self, mock_driver):
|
||||||
|
nova_sever_group_obj = compute_tasks.NovaServerGroupDelete()
|
||||||
|
sg_id = None
|
||||||
|
nova_sever_group_obj.execute(sg_id)
|
||||||
|
self.assertFalse(mock_driver.delete_server_group.called, sg_id)
|
2727
octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py
Normal file
2727
octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user