Merge "Convert Lb flows to use provider dicts"

This commit is contained in:
Zuul 2020-01-24 09:54:18 +00:00 committed by Gerrit Code Review
commit 7f96e56ec2
22 changed files with 814 additions and 439 deletions

View File

@ -88,14 +88,13 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
loadbalancer.flavor = None
if loadbalancer.availability_zone == driver_dm.Unset:
loadbalancer.availability_zone = None
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
payload = {consts.LOADBALANCER: loadbalancer.to_dict(),
consts.FLAVOR: loadbalancer.flavor,
consts.AVAILABILITY_ZONE: loadbalancer.availability_zone}
self.client.cast({}, 'create_load_balancer', **payload)
def loadbalancer_delete(self, loadbalancer, cascade=False):
loadbalancer_id = loadbalancer.loadbalancer_id
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id,
payload = {consts.LOADBALANCER: loadbalancer.to_dict(),
'cascade': cascade}
self.client.cast({}, 'delete_load_balancer', **payload)
@ -103,20 +102,21 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id}
self.client.cast({}, 'failover_load_balancer', **payload)
def loadbalancer_update(self, old_loadbalancer, new_loadbalancer):
def loadbalancer_update(self, original_load_balancer, new_loadbalancer):
# Adapt the provider data model to the queue schema
lb_dict = new_loadbalancer.to_dict()
if 'admin_state_up' in lb_dict:
lb_dict['enabled'] = lb_dict.pop('admin_state_up')
lb_id = lb_dict.pop('loadbalancer_id')
# Put the qos_policy_id back under the vip element the controller
# expects
vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None)
lb_dict.pop(consts.LOADBALANCER_ID)
if vip_qos_policy_id:
vip_dict = {"qos_policy_id": vip_qos_policy_id}
lb_dict["vip"] = vip_dict
payload = {consts.LOAD_BALANCER_ID: lb_id,
payload = {consts.ORIGINAL_LOADBALANCER:
original_load_balancer.to_dict(),
consts.LOAD_BALANCER_UPDATES: lb_dict}
self.client.cast({}, 'update_load_balancer', **payload)

View File

@ -364,6 +364,7 @@ NETWORK_ID = 'network_id'
NICS = 'nics'
OBJECT = 'object'
ORIGINAL_LISTENER = 'original_listener'
ORIGINAL_LOADBALANCER = 'original_load_balancer'
ORIGINAL_MEMBER = 'original_member'
ORIGINAL_POOL = 'original_pool'
PEER_PORT = 'peer_port'
@ -389,6 +390,7 @@ SNI_CONTAINERS = 'sni_containers'
SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
STATUS = 'status'
SUBNET = 'subnet'
SUBNET_ID = 'subnet_id'
TAGS = 'tags'
TIMEOUT_DICT = 'timeout_dict'
TLS_CERTIFICATE_ID = 'tls_certificate_id'
@ -398,8 +400,12 @@ TOTAL_CONNECTIONS = 'total_connections'
UPDATED_AT = 'updated_at'
UPDATE_DICT = 'update_dict'
VIP = 'vip'
VIP_ADDRESS = 'vip_address'
VIP_NETWORK = 'vip_network'
VIP_PORT_ID = 'vip_port_id'
VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
VIP_SUBNET = 'vip_subnet'
VIP_SUBNET_ID = 'vip_subnet_id'
VRRP_ID = 'vrrp_id'
VRRP_IP = 'vrrp_ip'
VRRP_GROUP = 'vrrp_group'

View File

@ -36,21 +36,24 @@ class Endpoints(object):
def __init__(self):
self.worker = controller_worker.ControllerWorker()
def create_load_balancer(self, context, load_balancer_id,
def create_load_balancer(self, context, loadbalancer,
flavor=None, availability_zone=None):
LOG.info('Creating load balancer \'%s\'...', load_balancer_id)
self.worker.create_load_balancer(load_balancer_id, flavor,
LOG.info('Creating load balancer \'%s\'...',
loadbalancer[constants.LOADBALANCER_ID])
self.worker.create_load_balancer(loadbalancer, flavor,
availability_zone)
def update_load_balancer(self, context, load_balancer_id,
def update_load_balancer(self, context, original_load_balancer,
load_balancer_updates):
LOG.info('Updating load balancer \'%s\'...', load_balancer_id)
self.worker.update_load_balancer(load_balancer_id,
LOG.info('Updating load balancer \'%s\'...',
original_load_balancer.get(constants.LOADBALANCER_ID))
self.worker.update_load_balancer(original_load_balancer,
load_balancer_updates)
def delete_load_balancer(self, context, load_balancer_id, cascade=False):
LOG.info('Deleting load balancer \'%s\'...', load_balancer_id)
self.worker.delete_load_balancer(load_balancer_id, cascade)
def delete_load_balancer(self, context, loadbalancer, cascade=False):
LOG.info('Deleting load balancer \'%s\'...',
loadbalancer.get(constants.LOADBALANCER_ID))
self.worker.delete_load_balancer(loadbalancer, cascade)
def failover_load_balancer(self, context, load_balancer_id):
LOG.info('Failing over amphora in load balancer \'%s\'...',

View File

@ -150,6 +150,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = health_mon.pool
pool.health_monitor = health_mon
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -161,7 +163,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
constants.POOL_ID: pool.id,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(create_hm_tf,
log=LOG):
create_hm_tf.run()
@ -178,6 +180,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = health_mon.pool
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -189,7 +193,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
constants.POOL_ID: pool.id,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(delete_hm_tf,
log=LOG):
delete_hm_tf.run()
@ -222,6 +226,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool.health_monitor = health_mon
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
update_hm_tf = self._taskflow_load(
self._health_monitor_flows.get_update_health_monitor_flow(),
@ -229,7 +235,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
constants.POOL_ID: pool.id,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.UPDATE_DICT: health_monitor_updates})
with tf_logging.DynamicLoggingListener(update_hm_tf,
log=LOG):
@ -256,11 +262,13 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
for l in listeners:
dict_listeners.append(
provider_utils.db_listener_to_provider_listener(l).to_dict())
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
create_listener_tf = self._taskflow_load(
self._listener_flows.get_create_listener_flow(),
store={constants.LISTENERS: dict_listeners,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id})
with tf_logging.DynamicLoggingListener(create_listener_tf,
log=LOG):
@ -277,11 +285,11 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
# the project ID
lb = self._lb_repo.get(db_apis.get_session(),
id=listener[constants.LOADBALANCER_ID])
delete_listener_tf = self._taskflow_load(
self._listener_flows.get_delete_listener_flow(),
store={constants.LISTENER: listener,
constants.LOADBALANCER_ID: lb.id,
constants.LOADBALANCER_ID:
listener[constants.LOADBALANCER_ID],
constants.PROJECT_ID: lb.project_id})
with tf_logging.DynamicLoggingListener(delete_listener_tf,
log=LOG):
@ -311,7 +319,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
def create_load_balancer(self, load_balancer_id, flavor=None,
def create_load_balancer(self, loadbalancer, flavor=None,
availability_zone=None):
"""Creates a load balancer by allocating Amphorae.
@ -319,84 +327,88 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
If none are available it will attempt to build one specifically
for this load balancer.
:param load_balancer_id: ID of the load balancer to create
:param loadbalancer: The dict of load balancer to create
:returns: None
:raises NoResultFound: Unable to find the object
"""
lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
lb = self._lb_repo.get(db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
if not lb:
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
'60 seconds.', 'load_balancer', load_balancer_id)
'60 seconds.', 'load_balancer',
loadbalancer[constants.LOADBALANCER_ID])
raise db_exceptions.NoResultFound
# TODO(johnsom) convert this to octavia_lib constant flavor
# once octavia is transitioned to use octavia_lib
store = {constants.LOADBALANCER_ID: load_balancer_id,
store = {constants.LOADBALANCER_ID:
loadbalancer[constants.LOADBALANCER_ID],
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: flavor,
constants.AVAILABILITY_ZONE: availability_zone}
topology = lb.topology
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
lb.listeners)
)
store[constants.UPDATE_DICT] = {
constants.TOPOLOGY: topology
}
create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
topology=topology, listeners=lb.listeners)
topology=topology, listeners=listeners_dicts)
create_lb_tf = self._taskflow_load(create_lb_flow, store=store)
with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
create_lb_tf.run()
def delete_load_balancer(self, load_balancer_id, cascade=False):
def delete_load_balancer(self, load_balancer, cascade=False):
"""Deletes a load balancer by de-allocating Amphorae.
:param load_balancer_id: ID of the load balancer to delete
:param load_balancer: Dict of the load balancer to delete
:returns: None
:raises LBNotFound: The referenced load balancer was not found
"""
lb = self._lb_repo.get(db_apis.get_session(),
id=load_balancer_id)
db_lb = self._lb_repo.get(db_apis.get_session(),
id=load_balancer[constants.LOADBALANCER_ID])
store = {}
if cascade:
(flow,
store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb)
flow = self._lb_flows.get_cascade_delete_load_balancer_flow(
load_balancer)
store.update(self._lb_flows.get_delete_pools_store(db_lb))
store.update(self._lb_flows.get_delete_listeners_store(db_lb))
else:
(flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb)
store.update({constants.LOADBALANCER: lb,
constants.SERVER_GROUP_ID: lb.server_group_id})
flow = self._lb_flows.get_delete_load_balancer_flow(
load_balancer)
store.update({constants.LOADBALANCER: load_balancer,
constants.SERVER_GROUP_ID: db_lb.server_group_id,
constants.PROJECT_ID: db_lb.project_id})
delete_lb_tf = self._taskflow_load(flow, store=store)
with tf_logging.DynamicLoggingListener(delete_lb_tf,
log=LOG):
delete_lb_tf.run()
def update_load_balancer(self, load_balancer_id, load_balancer_updates):
def update_load_balancer(self, original_load_balancer,
load_balancer_updates):
"""Updates a load balancer.
:param load_balancer_id: ID of the load balancer to update
:param original_load_balancer: Dict of the load balancer to update
:param load_balancer_updates: Dict containing updated load balancer
:returns: None
:raises LBNotFound: The referenced load balancer was not found
"""
lb = None
try:
lb = self._get_db_obj_until_pending_update(
self._lb_repo, load_balancer_id)
except tenacity.RetryError as e:
LOG.warning('Load balancer did not go into %s in 60 seconds. '
'This either due to an in-progress Octavia upgrade '
'or an overloaded and failing database. Assuming '
'an upgrade is in progress and continuing.',
constants.PENDING_UPDATE)
lb = e.last_attempt.result()
update_lb_tf = self._taskflow_load(
self._lb_flows.get_update_load_balancer_flow(),
store={constants.LOADBALANCER: lb,
constants.LOADBALANCER_ID: lb.id,
store={constants.LOADBALANCER: original_load_balancer,
constants.LOADBALANCER_ID:
original_load_balancer[constants.LOADBALANCER_ID],
constants.UPDATE_DICT: load_balancer_updates})
with tf_logging.DynamicLoggingListener(update_lb_tf,
@ -413,6 +425,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = self._pool_repo.get(db_apis.get_session(),
id=member[constants.POOL_ID])
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -423,7 +437,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store={constants.MEMBER: member,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.POOL_ID: pool.id})
with tf_logging.DynamicLoggingListener(create_member_tf,
log=LOG):
@ -440,6 +454,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
id=member[constants.POOL_ID])
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -449,7 +465,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._member_flows.get_delete_member_flow(),
store={constants.MEMBER: member,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.POOL_ID: pool.id,
constants.PROJECT_ID: load_balancer.project_id
@ -488,12 +504,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
pool.listeners))
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
batch_update_members_tf = self._taskflow_load(
self._member_flows.get_batch_update_members_flow(
provider_old_members, new_members, updated_members),
store={constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.POOL_ID: pool.id,
constants.PROJECT_ID: load_balancer.project_id})
@ -513,6 +531,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
pool = self._pool_repo.get(db_apis.get_session(),
id=member[constants.POOL_ID])
load_balancer = pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -522,7 +542,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._member_flows.get_update_member_flow(),
store={constants.MEMBER: member,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.POOL_ID: pool.id,
constants.UPDATE_DICT: member_updates})
@ -548,6 +568,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
raise db_exceptions.NoResultFound
load_balancer = db_pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -558,7 +580,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store={constants.POOL_ID: pool[constants.POOL_ID],
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(create_pool_tf,
log=LOG):
create_pool_tf.run()
@ -578,11 +600,14 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
db_pool.listeners))
load_balancer = db_pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
delete_pool_tf = self._taskflow_load(
self._pool_flows.get_delete_pool_flow(),
store={constants.POOL_ID: pool[constants.POOL_ID],
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.PROJECT_ID: db_pool.project_id})
with tf_logging.DynamicLoggingListener(delete_pool_tf,
@ -609,6 +634,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
db_pool = e.last_attempt.result()
load_balancer = db_pool.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -618,7 +645,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._pool_flows.get_update_pool_flow(),
store={constants.POOL_ID: db_pool.id,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.UPDATE_DICT: pool_updates})
with tf_logging.DynamicLoggingListener(update_pool_tf,
@ -645,6 +672,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
raise db_exceptions.NoResultFound
load_balancer = l7policy.listener.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -655,7 +684,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store={constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(create_l7policy_tf,
log=LOG):
create_l7policy_tf.run()
@ -676,12 +705,15 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
[l7policy.listener]))
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
delete_l7policy_tf = self._taskflow_load(
self._l7policy_flows.get_delete_l7policy_flow(),
store={constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(delete_l7policy_tf,
log=LOG):
delete_l7policy_tf.run()
@ -707,6 +739,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
l7policy = e.last_attempt.result()
load_balancer = l7policy.listener.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -716,7 +750,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._l7policy_flows.get_update_l7policy_flow(),
store={constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.UPDATE_DICT: l7policy_updates})
with tf_logging.DynamicLoggingListener(update_l7policy_tf,
@ -744,6 +778,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
l7policy = l7rule.l7policy
load_balancer = l7policy.listener.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -755,7 +791,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(create_l7rule_tf,
log=LOG):
create_l7rule_tf.run()
@ -776,13 +812,16 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
[l7policy.listener]))
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
delete_l7rule_tf = self._taskflow_load(
self._l7rule_flows.get_delete_l7rule_flow(),
store={constants.L7RULE: l7rule,
constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER_ID: load_balancer.id,
constants.LOADBALANCER: load_balancer})
constants.LOADBALANCER: provider_lb})
with tf_logging.DynamicLoggingListener(delete_l7rule_tf,
log=LOG):
delete_l7rule_tf.run()
@ -809,6 +848,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
l7policy = l7rule.l7policy
load_balancer = l7policy.listener.load_balancer
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
load_balancer).to_dict()
listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
@ -819,7 +860,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store={constants.L7RULE: l7rule,
constants.L7POLICY: l7policy,
constants.LISTENERS: listeners_dicts,
constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: load_balancer.id,
constants.UPDATE_DICT: l7rule_updates})
with tf_logging.DynamicLoggingListener(update_l7rule_tf,
@ -872,6 +913,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
# as well
lb = self._amphora_repo.get_lb_for_amphora(
db_apis.get_session(), amp.id)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
lb).to_dict() if lb else lb
if CONF.nova.enable_anti_affinity and lb:
stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id
if lb and lb.flavor_id:
@ -889,7 +932,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
failover_amphora_tf = self._taskflow_load(
self._amphora_flows.get_failover_flow(
role=amp.role, load_balancer=lb),
role=amp.role, load_balancer=provider_lb),
store=stored_params)
with tf_logging.DynamicLoggingListener(failover_amphora_tf, log=LOG):

View File

@ -25,12 +25,17 @@ from octavia.controller.worker.v2.tasks import compute_tasks
from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks
from octavia.controller.worker.v2.tasks import network_tasks
from octavia.db import api as db_apis
from octavia.db import repositories as repo
CONF = cfg.CONF
class AmphoraFlows(object):
def __init__(self):
self.lb_repo = repo.LoadBalancerRepository()
def get_create_amphora_flow(self):
"""Creates a flow to create an amphora.
@ -502,7 +507,9 @@ class AmphoraFlows(object):
# amp on the LB, they let the task index into a list of amps
# to find the amphora it should work on.
amp_index = 0
for amp in load_balancer.amphorae:
db_lb = self.lb_repo.get(db_apis.get_session(),
id=load_balancer[constants.LOADBALANCER_ID])
for amp in db_lb.amphorae:
if amp.status == constants.DELETED:
continue
update_amps_subflow.add(

View File

@ -30,6 +30,8 @@ from octavia.controller.worker.v2.tasks import compute_tasks
from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks
from octavia.controller.worker.v2.tasks import network_tasks
from octavia.db import api as db_apis
from octavia.db import repositories as repo
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -42,6 +44,7 @@ class LoadBalancerFlows(object):
self.listener_flows = listener_flows.ListenerFlows()
self.pool_flows = pool_flows.PoolFlows()
self.member_flows = member_flows.MemberFlows()
self.lb_repo = repo.LoadBalancerRepository()
def get_create_load_balancer_flow(self, topology, listeners=None):
"""Creates a conditional graph flow that allocates a loadbalancer to
@ -214,18 +217,25 @@ class LoadBalancerFlows(object):
the listeners stored properly
"""
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow')
db_lb = self.lb_repo.get(db_apis.get_session(),
id=lb[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
listener_name = 'listener_' + listener.id
listeners_delete_flow.add(
self.listener_flows.get_delete_listener_internal_flow(
listener_name))
return listeners_delete_flow
def get_delete_listeners_store(self, lb):
store = {}
for listener in lb.listeners:
listener_name = 'listener_' + listener.id
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
store[listener_name] = prov_listener.to_dict()
listeners_delete_flow.add(
self.listener_flows.get_delete_listener_internal_flow(
listener_name))
store.update({constants.LOADBALANCER_ID: lb.id,
constants.PROJECT_ID: lb.project_id})
return (listeners_delete_flow, store)
store.update({constants.LOADBALANCER_ID: lb.id,
constants.PROJECT_ID: lb.project_id})
return store
def get_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.
@ -234,6 +244,13 @@ class LoadBalancerFlows(object):
"""
return self._get_delete_load_balancer_flow(lb, False)
def get_delete_pools_store(self, lb):
store = {}
for pool in lb.pools:
pool_name = 'pool' + pool.id
store[pool_name] = pool.id
return store
def _get_delete_pools_flow(self, lb):
"""Sets up an internal delete flow
@ -244,18 +261,16 @@ class LoadBalancerFlows(object):
the listeners stored properly
"""
pools_delete_flow = unordered_flow.Flow('pool_delete_flow')
store = {}
for pool in lb.pools:
db_lb = self.lb_repo.get(db_apis.get_session(),
id=lb[constants.LOADBALANCER_ID])
for pool in db_lb.pools:
pool_name = 'pool' + pool.id
store[pool_name] = pool.id
pools_delete_flow.add(
self.pool_flows.get_delete_pool_flow_internal(
pool_name))
store[constants.PROJECT_ID] = lb.project_id
return (pools_delete_flow, store)
return pools_delete_flow
def _get_delete_load_balancer_flow(self, lb, cascade):
store = {}
delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)
delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
@ -264,9 +279,8 @@ class LoadBalancerFlows(object):
delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(
requires=constants.LOADBALANCER))
if cascade:
(listeners_delete, store) = self._get_delete_listeners_flow(lb)
(pools_delete, pool_store) = self._get_delete_pools_flow(lb)
store.update(pool_store)
listeners_delete = self._get_delete_listeners_flow(lb)
pools_delete = self._get_delete_pools_flow(lb)
delete_LB_flow.add(pools_delete)
delete_LB_flow.add(listeners_delete)
delete_LB_flow.add(network_tasks.UnplugVIP(
@ -282,8 +296,8 @@ class LoadBalancerFlows(object):
delete_LB_flow.add(database_tasks.MarkLBDeletedInDB(
requires=constants.LOADBALANCER))
delete_LB_flow.add(database_tasks.DecrementLoadBalancerQuota(
requires=constants.LOADBALANCER))
return (delete_LB_flow, store)
requires=constants.PROJECT_ID))
return delete_LB_flow
def get_cascade_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer.

View File

@ -24,6 +24,7 @@ from taskflow.types import failure
from octavia.amphorae.backends.agent import agent_jinja_cfg
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker import task_utils as task_utilities
@ -84,8 +85,11 @@ class AmpListenersUpdate(BaseAmphoraTask):
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amp[constants.ID])
db_amphorae.append(db_amp)
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.update_amphora_listeners(
loadbalancer, db_amphorae[amphora_index], timeout_dict)
db_lb, db_amphorae[amphora_index], timeout_dict)
except Exception as e:
amphora_id = amphorae[amphora_index].get(constants.ID)
LOG.error('Failed to update listeners on amphora %s. Skipping '
@ -124,20 +128,24 @@ class ListenersStart(BaseAmphoraTask):
def execute(self, loadbalancer, amphora=None):
"""Execute listener start routines for listeners on an amphora."""
if loadbalancer.listeners:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
if db_lb.listeners:
if amphora is not None:
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora[constants.ID])
else:
db_amp = amphora
self.amphora_driver.start(loadbalancer, db_amp)
self.amphora_driver.start(db_lb, db_amp)
LOG.debug("Started the listeners on the vip")
def revert(self, loadbalancer, *args, **kwargs):
"""Handle failed listeners starts."""
LOG.warning("Reverting listeners starts.")
for listener in loadbalancer.listeners:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
@ -234,7 +242,9 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
def execute(self, loadbalancer, added_ports):
"""Execute post_network_plug routine."""
amp_post_plug = AmphoraPostNetworkPlug()
for amphora in loadbalancer.amphorae:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amphora in db_lb.amphorae:
if amphora.id in added_ports:
amp_post_plug.execute(amphora.to_dict(),
added_ports[amphora.id])
@ -243,10 +253,12 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
"""Handle a failed post network plug."""
if isinstance(result, failure.Failure):
return
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
LOG.warning("Reverting post network plug.")
for amphora in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
db_lb.amphorae):
self.task_utils.mark_amphora_status_error(amphora.id)
@ -258,6 +270,8 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
"""Execute post_vip_routine."""
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora.get(constants.ID))
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
vrrp_port = data_models.Port(
**amphorae_network_config[
amphora.get(constants.ID)][constants.VRRP_PORT])
@ -265,7 +279,7 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
**amphorae_network_config[
amphora.get(constants.ID)][constants.VIP_SUBNET])
self.amphora_driver.post_vip_plug(
db_amp, loadbalancer, amphorae_network_config, vrrp_port=vrrp_port,
db_amp, db_lb, amphorae_network_config, vrrp_port=vrrp_port,
vip_subnet=vip_subnet)
LOG.debug("Notified amphora of vip plug")
@ -275,7 +289,8 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
return
LOG.warning("Reverting post vip plug.")
self.task_utils.mark_amphora_status_error(amphora.get(constants.ID))
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class AmphoraePostVIPPlug(BaseAmphoraTask):
@ -284,8 +299,10 @@ class AmphoraePostVIPPlug(BaseAmphoraTask):
def execute(self, loadbalancer, amphorae_network_config):
"""Execute post_vip_plug across the amphorae."""
amp_post_vip_plug = AmphoraPostVIPPlug()
for amphora in loadbalancer.amphorae:
amp_post_vip_plug.execute(amphora,
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amphora in db_lb.amphorae:
amp_post_vip_plug.execute(amphora.to_dict(),
loadbalancer,
amphorae_network_config)
@ -294,7 +311,8 @@ class AmphoraePostVIPPlug(BaseAmphoraTask):
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting amphorae post vip plug.")
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class AmphoraCertUpload(BaseAmphoraTask):
@ -317,6 +335,8 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
def execute(self, loadbalancer):
"""Execute post_vip_routine."""
amps = []
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
@ -324,7 +344,7 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
CONF.haproxy_amphora.active_connection_rety_interval}
for amp in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
db_lb.amphorae):
try:
interface = self.amphora_driver.get_vrrp_interface(
@ -342,17 +362,20 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
vrrp_interface=interface)
amps.append(self.amphora_repo.get(db_apis.get_session(),
id=amp.id))
loadbalancer.amphorae = amps
return loadbalancer
db_lb.amphorae = amps
return provider_utils.db_loadbalancer_to_provider_loadbalancer(
db_lb).to_dict()
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failed amphora vip plug notification."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting Get Amphora VRRP Interface.")
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amp in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
db_lb.amphorae):
try:
self.amphora_repo.update(db_apis.get_session(), amp.id,
@ -368,28 +391,34 @@ class AmphoraVRRPUpdate(BaseAmphoraTask):
def execute(self, loadbalancer, amphorae_network_config):
"""Execute update_vrrp_conf."""
self.amphora_driver.update_vrrp_conf(loadbalancer,
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.update_vrrp_conf(db_lb,
amphorae_network_config)
LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
class AmphoraVRRPStop(BaseAmphoraTask):
"""Task to stop keepalived of all amphorae of a LB."""
def execute(self, loadbalancer):
self.amphora_driver.stop_vrrp_service(loadbalancer)
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.stop_vrrp_service(db_lb)
LOG.debug("Stopped VRRP of loadbalancer %s amphorae",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
class AmphoraVRRPStart(BaseAmphoraTask):
"""Task to start keepalived of all amphorae of a LB."""
def execute(self, loadbalancer):
self.amphora_driver.start_vrrp_service(loadbalancer)
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.start_vrrp_service(db_lb)
LOG.debug("Started VRRP of loadbalancer %s amphorae",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
class AmphoraComputeConnectivityWait(BaseAmphoraTask):

View File

@ -29,6 +29,8 @@ from octavia.common.jinja.logging import logging_jinja_cfg
from octavia.common.jinja import user_data_jinja_cfg
from octavia.common import utils
from octavia.controller.worker import amphora_rate_limit
from octavia.db import api as db_apis
from octavia.db import repositories as repo
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -44,6 +46,7 @@ class BaseComputeTask(task.Task):
name=CONF.controller_worker.compute_driver,
invoke_on_load=True
).driver
self.loadbalancer_repo = repo.LoadBalancerRepository()
self.rate_limit = amphora_rate_limit.AmphoraBuildRateLimit()
@ -180,7 +183,9 @@ class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
"""
def execute(self, loadbalancer):
for amp in loadbalancer.amphorae:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amp in db_lb.amphorae:
# The compute driver will already handle NotFound
try:
self.compute.delete(amp.compute_id)

View File

@ -144,7 +144,9 @@ class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
marked DELETED.
:returns: None
"""
for amp in loadbalancer.amphorae:
db_lb = self.repos.load_balancer.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amp in db_lb.amphorae:
LOG.debug("Marking amphora %s DELETED ", amp.id)
self.amphora_repo.update(db_apis.get_session(),
id=amp.id, status=constants.DELETED)
@ -397,8 +399,11 @@ class ReloadLoadBalancer(BaseDatabaseTask):
LOG.debug("Get load balancer from DB for load balancer id: %s ",
loadbalancer_id)
return self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
db_lb = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
lb_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
db_lb)
return lb_dict.to_dict()
class UpdateVIPAfterAllocation(BaseDatabaseTask):
@ -413,10 +418,13 @@ class UpdateVIPAfterAllocation(BaseDatabaseTask):
:returns: The load balancer object.
"""
self.repos.vip.update(db_apis.get_session(), loadbalancer_id,
port_id=vip.port_id, subnet_id=vip.subnet_id,
ip_address=vip.ip_address)
return self.repos.load_balancer.get(db_apis.get_session(),
id=loadbalancer_id)
port_id=vip[constants.PORT_ID],
subnet_id=vip[constants.SUBNET_ID],
ip_address=vip[constants.IP_ADDRESS])
db_lb = self.repos.load_balancer.get(db_apis.get_session(),
id=loadbalancer_id)
return provider_utils.db_loadbalancer_to_provider_loadbalancer(
db_lb).to_dict()
class UpdateAmphoraeVIPData(BaseDatabaseTask):
@ -1016,14 +1024,17 @@ class MarkLBActiveInDB(BaseDatabaseTask):
if self.mark_subobjects:
LOG.debug("Marking all listeners of loadbalancer %s ACTIVE",
loadbalancer.id)
for listener in loadbalancer.listeners:
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
self._mark_listener_status(listener, constants.ACTIVE)
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
loadbalancer[constants.LOADBALANCER_ID],
provisioning_status=constants.ACTIVE)
def _mark_listener_status(self, listener, status):
@ -1094,8 +1105,11 @@ class MarkLBActiveInDB(BaseDatabaseTask):
if self.mark_subobjects:
LOG.debug("Marking all listeners of loadbalancer %s ERROR",
loadbalancer.id)
for listener in loadbalancer.listeners:
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
try:
self._mark_listener_status(listener, constants.ERROR)
except Exception:
@ -1103,8 +1117,10 @@ class MarkLBActiveInDB(BaseDatabaseTask):
"status", listener.id)
LOG.warning("Reverting mark load balancer deleted in DB "
"for load balancer id %s", loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
"for load balancer id %s",
loadbalancer[constants.LOADBALANCER_ID])
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class MarkLBActiveInDBByListener(BaseDatabaseTask):
@ -1196,9 +1212,9 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
"""
LOG.debug("Mark DELETED in DB for load balancer id: %s",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
loadbalancer[constants.LOADBALANCER_ID],
provisioning_status=constants.DELETED)
def revert(self, loadbalancer, *args, **kwargs):
@ -1209,8 +1225,10 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
"""
LOG.warning("Reverting mark load balancer deleted in DB "
"for load balancer id %s", loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
"for load balancer id %s",
loadbalancer[constants.LOADBALANCER_ID])
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class MarkLBPendingDeleteInDB(BaseDatabaseTask):
@ -1227,9 +1245,9 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
"""
LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
loadbalancer[constants.LOADBALANCER_ID],
provisioning_status=(constants.
PENDING_DELETE))
@ -1241,8 +1259,10 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
"""
LOG.warning("Reverting mark load balancer pending delete in DB "
"for load balancer id %s", loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
"for load balancer id %s",
loadbalancer[constants.LOADBALANCER_ID])
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
@ -1373,13 +1393,15 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
:returns: None
"""
LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer.id)
LOG.debug("Update DB for loadbalancer id: %s ",
loadbalancer[constants.LOADBALANCER_ID])
if update_dict.get('vip'):
vip_dict = update_dict.pop('vip')
self.vip_repo.update(db_apis.get_session(),
loadbalancer.vip.load_balancer_id,
loadbalancer[constants.LOADBALANCER_ID],
**vip_dict)
self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id,
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer[constants.LOADBALANCER_ID],
**update_dict)
def revert(self, loadbalancer, *args, **kwargs):
@ -1390,9 +1412,11 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
"""
LOG.warning("Reverting update loadbalancer in DB "
"for loadbalancer id %s", loadbalancer.id)
"for loadbalancer id %s",
loadbalancer[constants.LOADBALANCER_ID])
self.task_utils.mark_loadbalancer_prov_status_error(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_error(
loadbalancer[constants.LOADBALANCER_ID])
class UpdateHealthMonInDB(BaseDatabaseTask):
@ -1644,7 +1668,9 @@ class GetAmphoraeFromLoadbalancer(BaseDatabaseTask):
:returns: A list of Listener objects
"""
amphorae = []
for amp in loadbalancer.amphorae:
db_lb = self.repos.load_balancer.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amp in db_lb.amphorae:
a = self.amphora_repo.get(db_apis.get_session(), id=amp.id,
show_deleted=False)
if a is None:
@ -1663,7 +1689,9 @@ class GetListenersFromLoadbalancer(BaseDatabaseTask):
:returns: A list of Listener objects
"""
listeners = []
for listener in loadbalancer.listeners:
db_lb = self.repos.load_balancer.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
db_l = self.listener_repo.get(db_apis.get_session(),
id=listener.id)
prov_listener = provider_utils.db_listener_to_provider_listener(
@ -1681,7 +1709,9 @@ class GetVipFromLoadbalancer(BaseDatabaseTask):
:param loadbalancer: Load balancer which VIP is required
:returns: VIP associated with a given load balancer
"""
return loadbalancer.vip
db_lb = self.repos.load_balancer.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
return db_lb.vip.to_dict(recurse=True)
class CreateVRRPGroupForLB(BaseDatabaseTask):
@ -1695,10 +1725,11 @@ class CreateVRRPGroupForLB(BaseDatabaseTask):
:returns: Updated load balancer
"""
try:
loadbalancer.vrrp_group = self.repos.vrrpgroup.create(
self.repos.vrrpgroup.create(
db_apis.get_session(),
load_balancer_id=loadbalancer.id,
vrrp_group_name=str(loadbalancer.id).replace('-', ''),
load_balancer_id=loadbalancer[constants.LOADBALANCER_ID],
vrrp_group_name=str(
loadbalancer[constants.LOADBALANCER_ID]).replace('-', ''),
vrrp_auth_type=constants.VRRP_AUTH_DEFAULT,
vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7],
advert_int=CONF.keepalived_vrrp.vrrp_advert_int)
@ -1737,7 +1768,9 @@ class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask):
:param loadbalancer: The load balancer to disable health monitoring on
:returns: None
"""
for amphora in loadbalancer.amphorae:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amphora in db_lb.amphorae:
self._delete_from_amp_health(amphora.id)
@ -1770,7 +1803,9 @@ class MarkLBAmphoraeHealthBusy(BaseDatabaseTask):
:param loadbalancer: The load balancer to mark amphorae health busy
:returns: None
"""
for amphora in loadbalancer.amphorae:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amphora in db_lb.amphorae:
self._mark_amp_health_busy(amphora.id)
@ -2534,39 +2569,40 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
Since sqlalchemy will likely retry by itself always revert if it fails
"""
def execute(self, loadbalancer):
def execute(self, project_id):
"""Decrements the load balancer quota.
:param loadbalancer: The load balancer to decrement the quota on.
:param project_id: Project id where quota should be reduced
:returns: None
"""
LOG.debug("Decrementing load balancer quota for "
"project: %s ", loadbalancer.project_id)
"project: %s ", project_id)
lock_session = db_apis.get_session(autocommit=False)
try:
self.repos.decrement_quota(lock_session,
data_models.LoadBalancer,
loadbalancer.project_id)
project_id)
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement load balancer quota for '
'project: %(proj)s the project may have excess '
'quota in use.', {'proj': loadbalancer.project_id})
'quota in use.',
{'proj': project_id})
lock_session.rollback()
def revert(self, loadbalancer, result, *args, **kwargs):
def revert(self, project_id, result, *args, **kwargs):
"""Re-apply the quota
:param loadbalancer: The load balancer to decrement the quota on.
:param project_id: The project id to decrement the quota on.
:returns: None
"""
LOG.warning('Reverting decrement quota for load balancer on project '
'%(proj)s Project quota counts may be incorrect.',
{'proj': loadbalancer.project_id})
{'proj': project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):
@ -2578,7 +2614,7 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
self.repos.check_quota_met(session,
lock_session,
data_models.LoadBalancer,
loadbalancer.project_id)
project_id)
lock_session.commit()
except Exception:
lock_session.rollback()

View File

@ -56,7 +56,8 @@ class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_health_mon_prov_status_error(health_mon.pool_id)
self.task_utils.mark_pool_prov_status_active(health_mon.pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
@ -70,7 +71,8 @@ class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
def revert(self, l7policy, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_l7policy_prov_status_error(l7policy.id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
@ -85,7 +87,8 @@ class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
def revert(self, l7rule, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_l7rule_prov_status_error(l7rule.id)
self.task_utils.mark_l7policy_prov_status_active(l7rule.l7policy_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
@ -135,7 +138,8 @@ class LoadBalancerToErrorOnRevertTask(LoadBalancerIDToErrorOnRevertTask):
pass
def revert(self, loadbalancer, *args, **kwargs):
super(LoadBalancerToErrorOnRevertTask, self).revert(loadbalancer.id)
super(LoadBalancerToErrorOnRevertTask, self).revert(
loadbalancer[constants.LOADBALANCER_ID])
class MemberToErrorOnRevertTask(BaseLifecycleTask):
@ -152,7 +156,8 @@ class MemberToErrorOnRevertTask(BaseLifecycleTask):
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
self.task_utils.mark_pool_prov_status_active(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
class MembersToErrorOnRevertTask(BaseLifecycleTask):
@ -170,7 +175,8 @@ class MembersToErrorOnRevertTask(BaseLifecycleTask):
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])
self.task_utils.mark_pool_prov_status_active(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
class PoolToErrorOnRevertTask(BaseLifecycleTask):
@ -181,7 +187,8 @@ class PoolToErrorOnRevertTask(BaseLifecycleTask):
def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs):
self.task_utils.mark_pool_prov_status_error(pool_id)
self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer.id)
self.task_utils.mark_loadbalancer_prov_status_active(
loadbalancer[constants.LOADBALANCER_ID])
for listener in listeners:
self.task_utils.mark_listener_prov_status_active(
listener[constants.LISTENER_ID])

View File

@ -20,6 +20,7 @@ from taskflow import task
from taskflow.types import failure
from octavia.common import constants
from octavia.common import data_models
from octavia.common import utils
from octavia.controller.worker import task_utils
from octavia.db import api as db_apis
@ -62,8 +63,9 @@ class CalculateAmphoraDelta(BaseNetworkTask):
amphora[constants.VRRP_PORT_ID])
desired_network_ids = {vrrp_port.network_id}.union(
CONF.controller_worker.amp_boot_network_list)
for pool in loadbalancer.pools:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for pool in db_lb.pools:
member_networks = [
self.network_driver.get_subnet(member.subnet_id).network_id
for member in pool.members
@ -113,9 +115,11 @@ class CalculateDelta(BaseNetworkTask):
calculate_amp = CalculateAmphoraDelta()
deltas = {}
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amphora in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
db_lb.amphorae):
delta = calculate_amp.execute(loadbalancer, amphora.to_dict())
deltas[amphora.id] = delta
@ -210,7 +214,7 @@ class UnPlugNetworks(BaseNetworkTask):
class GetMemberPorts(BaseNetworkTask):
def execute(self, loadbalancer, amphora):
vip_port = self.network_driver.get_port(loadbalancer.vip.port_id)
vip_port = self.network_driver.get_port(loadbalancer['vip_port_id'])
member_ports = []
interfaces = self.network_driver.get_plugged_networks(
amphora[constants.COMPUTE_ID])
@ -339,10 +343,12 @@ class PlugVIP(BaseNetworkTask):
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id)
amps_data = self.network_driver.plug_vip(loadbalancer,
loadbalancer.vip)
LOG.debug("Plumbing VIP for loadbalancer id: %s",
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
amps_data = self.network_driver.plug_vip(db_lb,
db_lb.vip)
return [amp.to_dict() for amp in amps_data]
def revert(self, result, loadbalancer, *args, **kwargs):
@ -351,23 +357,25 @@ class PlugVIP(BaseNetworkTask):
if isinstance(result, failure.Failure):
return
LOG.warning("Unable to plug VIP for loadbalancer id %s",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
try:
# Make sure we have the current port IDs for cleanup
for amp_data in result:
for amphora in six.moves.filter(
# pylint: disable=cell-var-from-loop
lambda amp: amp.id == amp_data.id,
loadbalancer.amphorae):
amphora.vrrp_port_id = amp_data.vrrp_port_id
amphora.ha_port_id = amp_data.ha_port_id
lambda amp: amp.id == amp_data['id'],
db_lb.amphorae):
amphora.vrrp_port_id = amp_data['vrrp_port_id']
amphora.ha_port_id = amp_data['ha_port_id']
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
self.network_driver.unplug_vip(db_lb, db_lb.vip)
except Exception as e:
LOG.error("Failed to unplug VIP. Resources may still "
"be in use from vip: %(vip)s due to error: %(except)s",
{'vip': loadbalancer.vip.ip_address, 'except': e})
{'vip': loadbalancer['vip_address'], 'except': e})
class UpdateVIPSecurityGroup(BaseNetworkTask):
@ -376,9 +384,11 @@ class UpdateVIPSecurityGroup(BaseNetworkTask):
def execute(self, loadbalancer):
"""Task to setup SG for LB."""
LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer.id)
self.network_driver.update_vip_sg(loadbalancer, loadbalancer.vip)
LOG.debug("Setup SG for loadbalancer id: %s",
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
self.network_driver.update_vip_sg(db_lb, db_lb.vip)
class GetSubnetFromVIP(BaseNetworkTask):
@ -387,10 +397,11 @@ class GetSubnetFromVIP(BaseNetworkTask):
def execute(self, loadbalancer):
"""Plumb a vip to an amphora."""
LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
LOG.debug("Getting subnet for LB: %s",
loadbalancer[constants.LOADBALANCER_ID])
return self.network_driver.get_subnet(
loadbalancer.vip.subnet_id).to_dict()
loadbalancer['vip_subnet_id']).to_dict()
class PlugVIPAmpphora(BaseNetworkTask):
@ -404,8 +415,10 @@ class PlugVIPAmpphora(BaseNetworkTask):
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora.get(constants.ID))
db_subnet = self.network_driver.get_subnet(subnet[constants.ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
amp_data = self.network_driver.plug_aap_port(
loadbalancer, loadbalancer.vip, db_amp, db_subnet)
db_lb, db_lb.vip, db_amp, db_subnet)
return amp_data.to_dict()
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
@ -414,19 +427,23 @@ class PlugVIPAmpphora(BaseNetworkTask):
return
LOG.warning("Unable to plug VIP for amphora id %s "
"load balancer id %s",
amphora.get(constants.ID), loadbalancer.id)
amphora.get(constants.ID),
loadbalancer[constants.LOADBALANCER_ID])
try:
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora.get(constants.ID))
db_amp.vrrp_port_id = result[constants.VRRP_PORT_ID]
db_amp.ha_port_id = result[constants.HA_PORT_ID]
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
self.network_driver.unplug_aap_port(loadbalancer.vip,
self.network_driver.unplug_aap_port(db_lb.vip,
db_amp, subnet)
except Exception as e:
LOG.error('Failed to unplug AAP port. Resources may still be in '
'use for VIP: %s due to error: %s', loadbalancer.vip, e)
'use for VIP: %s due to error: %s', db_lb.vip, e)
class UnplugVIP(BaseNetworkTask):
@ -437,10 +454,13 @@ class UnplugVIP(BaseNetworkTask):
LOG.debug("Unplug vip on amphora")
try:
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
self.network_driver.unplug_vip(db_lb, db_lb.vip)
except Exception:
LOG.exception("Unable to unplug vip from load balancer %s",
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
class AllocateVIP(BaseNetworkTask):
@ -451,10 +471,13 @@ class AllocateVIP(BaseNetworkTask):
LOG.debug("Allocate_vip port_id %s, subnet_id %s,"
"ip_address %s",
loadbalancer.vip.port_id,
loadbalancer.vip.subnet_id,
loadbalancer.vip.ip_address)
return self.network_driver.allocate_vip(loadbalancer)
loadbalancer[constants.VIP_PORT_ID],
loadbalancer[constants.VIP_SUBNET_ID],
loadbalancer[constants.VIP_ADDRESS])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
vip = self.network_driver.allocate_vip(db_lb)
return vip.to_dict()
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failure to allocate vip."""
@ -462,7 +485,7 @@ class AllocateVIP(BaseNetworkTask):
if isinstance(result, failure.Failure):
LOG.exception("Unable to allocate VIP")
return
vip = result
vip = data_models.Vip(**result)
LOG.warning("Deallocating vip %s", vip.ip_address)
try:
self.network_driver.deallocate_vip(vip)
@ -478,14 +501,16 @@ class DeallocateVIP(BaseNetworkTask):
def execute(self, loadbalancer):
"""Deallocate a VIP."""
LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address)
LOG.debug("Deallocating a VIP %s", loadbalancer[constants.VIP_ADDRESS])
# NOTE(blogan): this is kind of ugly but sufficient for now. Drivers
# will need access to the load balancer that the vip is/was attached
# to. However the data model serialization for the vip does not give a
# backref to the loadbalancer if accessed through the loadbalancer.
vip = loadbalancer.vip
vip.load_balancer = loadbalancer
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
vip = db_lb.vip
vip.load_balancer = db_lb
self.network_driver.deallocate_vip(vip)
@ -509,7 +534,6 @@ class UpdateVIPForDelete(BaseNetworkTask):
db_apis.get_session(), id=loadbalancer_id)
LOG.debug("Updating VIP for listener delete on load_balancer %s.",
loadbalancer.id)
self.network_driver.update_vip(loadbalancer, for_delete=True)
@ -520,8 +544,10 @@ class GetAmphoraNetworkConfigs(BaseNetworkTask):
LOG.debug("Retrieving vip network details.")
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora.get(constants.ID))
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
db_configs = self.network_driver.get_network_configs(
loadbalancer, amphora=db_amp)
db_lb, amphora=db_amp)
provider_dict = {}
for amp_id, amp_conf in six.iteritems(db_configs):
provider_dict[amp_id] = amp_conf.to_dict(recurse=True)
@ -533,7 +559,9 @@ class GetAmphoraeNetworkConfigs(BaseNetworkTask):
def execute(self, loadbalancer):
LOG.debug("Retrieving vip network details.")
db_configs = self.network_driver.get_network_configs(loadbalancer)
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
db_configs = self.network_driver.get_network_configs(db_lb)
provider_dict = {}
for amp_id, amp_conf in six.iteritems(db_configs):
provider_dict[amp_id] = amp_conf.to_dict(recurse=True)
@ -641,7 +669,10 @@ class ApplyQos(BaseNetworkTask):
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
if not amps_data:
amps_data = loadbalancer.amphorae
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
amps_data = db_lb.amphorae
apply_qos = ApplyQosAmphora()
for amp_data in amps_data:
@ -650,7 +681,7 @@ class ApplyQos(BaseNetworkTask):
def execute(self, loadbalancer, amps_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
qos_policy_id = loadbalancer['vip_qos_policy_id']
if not qos_policy_id and (
update_dict and (
'vip' not in update_dict or
@ -661,9 +692,10 @@ class ApplyQos(BaseNetworkTask):
def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
request_qos_id = loadbalancer.vip.qos_policy_id
request_qos_id = loadbalancer['vip_qos_policy_id']
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
@ -693,7 +725,7 @@ class ApplyQosAmphora(BaseNetworkTask):
def execute(self, loadbalancer, amp_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
qos_policy_id = loadbalancer['vip_qos_policy_id']
if not qos_policy_id and (
update_dict and (
'vip' not in update_dict or
@ -705,9 +737,9 @@ class ApplyQosAmphora(BaseNetworkTask):
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
try:
request_qos_id = loadbalancer.vip.qos_policy_id
request_qos_id = loadbalancer['vip_qos_policy_id']
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
loadbalancer[constants.LOADBALANCER_ID])
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_port(loadbalancer, amp_data,

View File

@ -60,7 +60,7 @@ class TestAmphoraDriver(base.TestRpc):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_create(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
payload = {consts.LOADBALANCER: provider_lb.to_dict(),
consts.FLAVOR: None,
consts.AVAILABILITY_ZONE: None}
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
@ -70,7 +70,7 @@ class TestAmphoraDriver(base.TestRpc):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_delete(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
payload = {consts.LOADBALANCER: provider_lb.to_dict(),
'cascade': False}
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
@ -88,7 +88,7 @@ class TestAmphoraDriver(base.TestRpc):
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
lb_dict = {'enabled': True}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(),
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@ -100,7 +100,7 @@ class TestAmphoraDriver(base.TestRpc):
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
lb_dict = {'name': 'Great LB'}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(),
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@ -114,7 +114,7 @@ class TestAmphoraDriver(base.TestRpc):
vip_qos_policy_id=qos_policy_id)
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
payload = {consts.ORIGINAL_LOADBALANCER: old_provider_lb.to_dict(),
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)

View File

@ -41,31 +41,34 @@ class TestEndpoints(base.TestCase):
self.resource = {constants.ID: self.resource_id}
self.server_group_id = 3456
self.listener_dict = {constants.LISTENER_ID: uuidutils.generate_uuid()}
self.loadbalancer_dict = {
constants.LOADBALANCER_ID: uuidutils.generate_uuid()
}
self.flavor_id = uuidutils.generate_uuid()
self.availability_zone = uuidutils.generate_uuid()
def test_create_load_balancer(self):
self.ep.create_load_balancer(self.context, self.resource_id,
self.ep.create_load_balancer(self.context, self.loadbalancer_dict,
flavor=self.flavor_id,
availability_zone=self.availability_zone)
self.ep.worker.create_load_balancer.assert_called_once_with(
self.resource_id, self.flavor_id, self.availability_zone)
self.loadbalancer_dict, self.flavor_id, self.availability_zone)
def test_create_load_balancer_no_flavor_or_az(self):
self.ep.create_load_balancer(self.context, self.resource_id)
self.ep.create_load_balancer(self.context, self.loadbalancer_dict)
self.ep.worker.create_load_balancer.assert_called_once_with(
self.resource_id, None, None)
self.loadbalancer_dict, None, None)
def test_update_load_balancer(self):
self.ep.update_load_balancer(self.context, self.resource_id,
self.ep.update_load_balancer(self.context, self.loadbalancer_dict,
self.resource_updates)
self.ep.worker.update_load_balancer.assert_called_once_with(
self.resource_id, self.resource_updates)
self.loadbalancer_dict, self.resource_updates)
def test_delete_load_balancer(self):
self.ep.delete_load_balancer(self.context, self.resource_id)
self.ep.delete_load_balancer(self.context, self.loadbalancer_dict)
self.ep.worker.delete_load_balancer.assert_called_once_with(
self.resource_id, False)
self.loadbalancer_dict, False)
def test_failover_load_balancer(self):
self.ep.failover_load_balancer(self.context, self.resource_id)

View File

@ -232,10 +232,14 @@ class TestAmphoraFlows(base.TestCase):
result = self.AmpFlow._create_new_amp_for_lb_decider(history)
self.assertFalse(result)
def test_get_failover_flow_allocated(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_get_failover_flow_allocated(self, mock_session, mock_get_lb,
mock_get_net_driver):
mock_get_lb.return_value = self.lb
provider_lb = {constants.LOADBALANCER_ID: '1234'}
amp_flow = self.AmpFlow.get_failover_flow(
load_balancer=self.lb)
load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow)
@ -255,7 +259,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role=constants.ROLE_MASTER, load_balancer=self.lb)
role=constants.ROLE_MASTER, load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow)
@ -275,7 +279,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role=constants.ROLE_BACKUP, load_balancer=self.lb)
role=constants.ROLE_BACKUP, load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow)
@ -295,7 +299,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role='BOGUSROLE', load_balancer=self.lb)
role='BOGUSROLE', load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow)

View File

@ -87,17 +87,22 @@ class TestLoadBalancerFlows(base.TestCase):
listener_mock.id = '123'
lb_mock.listeners = [listener_mock]
lb_flow, store = self.LBFlow.get_delete_load_balancer_flow(lb_mock)
lb_flow = self.LBFlow.get_delete_load_balancer_flow(lb_mock)
self.assertIsInstance(lb_flow, flow.Flow)
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires)
self.assertIn(constants.PROJECT_ID, lb_flow.requires)
self.assertEqual(0, len(lb_flow.provides))
self.assertEqual(2, len(lb_flow.requires))
self.assertEqual(3, len(lb_flow.requires))
def test_get_delete_load_balancer_flow_cascade(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_get_delete_load_balancer_flow_cascade(self, mock_session,
mock_get_lb,
mock_get_net_driver):
lb_mock = mock.Mock()
listener_mock = mock.Mock()
listener_mock.id = '123'
@ -112,9 +117,12 @@ class TestLoadBalancerFlows(base.TestCase):
l7_mock = mock.Mock()
l7_mock.id = '678'
listener_mock.l7policies = [l7_mock]
lb_flow, store = self.LBFlow.get_cascade_delete_load_balancer_flow(
lb_mock)
mock_get_lb.return_value = lb_mock
lb_dict = {constants.LOADBALANCER_ID: lb_mock.id}
store = self.LBFlow.get_delete_listeners_store(lb_mock)
store.update(self.LBFlow.get_delete_pools_store(lb_mock))
lb_flow = self.LBFlow.get_cascade_delete_load_balancer_flow(
lb_dict)
self.assertIsInstance(lb_flow, flow.Flow)
self.assertEqual({'listener_123': listener_dict,

View File

@ -46,14 +46,16 @@ _amphora_mock = {
constants.STATUS: constants.AMPHORA_ALLOCATED,
constants.COMPUTE_ID: COMPUTE_ID,
}
_load_balancer_mock = mock.MagicMock()
_load_balancer_mock.id = LB_ID
_db_load_balancer_mock = mock.MagicMock()
_db_load_balancer_mock.id = LB_ID
_listener_mock = mock.MagicMock()
_listener_mock.id = LISTENER_ID
_load_balancer_mock.listeners = [_listener_mock]
_db_load_balancer_mock.listeners = [_listener_mock]
_vip_mock = mock.MagicMock()
_load_balancer_mock.vip = _vip_mock
_LB_mock = mock.MagicMock()
_db_load_balancer_mock.vip = _vip_mock
_LB_mock = {
constants.LOADBALANCER_ID: LB_ID,
}
_amphorae_mock = [_db_amphora_mock]
_network_mock = mock.MagicMock()
_session_mock = mock.MagicMock()
@ -72,8 +74,6 @@ class TestAmphoraDriverTasks(base.TestCase):
def setUp(self):
_LB_mock.amphorae = [_db_amphora_mock]
_LB_mock.id = LB_ID
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="haproxy_amphora",
active_connection_max_retries=CONN_MAX_RETRIES)
@ -83,7 +83,9 @@ class TestAmphoraDriverTasks(base.TestCase):
loadbalancer_topology=constants.TOPOLOGY_SINGLE)
super(TestAmphoraDriverTasks, self).setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amp_listener_update(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
@ -98,16 +100,17 @@ class TestAmphoraDriverTasks(base.TestCase):
constants.CONN_MAX_RETRIES: 3,
constants.CONN_RETRY_INTERVAL: 4}
mock_amphora_repo_get.return_value = _db_amphora_mock
mock_lb_get.return_value = _db_load_balancer_mock
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
amp_list_update_obj.execute(_load_balancer_mock, 0,
amp_list_update_obj.execute(_LB_mock, 0,
[_amphora_mock], timeout_dict)
mock_driver.update_amphora_listeners.assert_called_once_with(
_load_balancer_mock, _db_amphora_mock, timeout_dict)
_db_load_balancer_mock, _db_amphora_mock, timeout_dict)
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
amp_list_update_obj.execute(_load_balancer_mock, 0,
amp_list_update_obj.execute(_LB_mock, 0,
[_amphora_mock], timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
@ -141,7 +144,7 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.update.assert_not_called()
# Test the revert
amp = listeners_update_obj.revert(lb)
amp = listeners_update_obj.revert(_LB_mock)
expected_db_calls = [mock.call(_session_mock,
id=listeners[0].id,
provisioning_status=constants.ERROR),
@ -154,7 +157,8 @@ class TestAmphoraDriverTasks(base.TestCase):
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_listeners_start(self,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_listeners_start(self, mock_lb_get,
mock_prov_status_error,
mock_driver,
mock_generate_uuid,
@ -171,18 +175,19 @@ class TestAmphoraDriverTasks(base.TestCase):
# Test no listeners
mock_lb.listeners = None
listeners_start_obj.execute(mock_lb)
mock_lb_get.return_value = mock_lb
listeners_start_obj.execute(_LB_mock)
mock_driver.start.assert_not_called()
# Test with listeners
mock_driver.start.reset_mock()
mock_lb.listeners = [mock_listener]
listeners_start_obj.execute(mock_lb)
listeners_start_obj.execute(_LB_mock)
mock_driver.start.assert_called_once_with(mock_lb, None)
# Test revert
mock_lb.listeners = [mock_listener]
listeners_start_obj.revert(mock_lb)
listeners_start_obj.revert(_LB_mock)
mock_prov_status_error.assert_called_once_with('12345')
def test_listener_delete(self,
@ -330,7 +335,9 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp)
def test_amphorae_post_network_plug(self, mock_driver,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphorae_post_network_plug(self, mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
@ -341,7 +348,8 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.get_network.return_value = _network_mock
_db_amphora_mock.id = AMP_ID
_db_amphora_mock.compute_id = COMPUTE_ID
_LB_mock.amphorae = [_db_amphora_mock]
_db_load_balancer_mock.amphorae = [_db_amphora_mock]
mock_lb_get.return_value = _db_load_balancer_mock
mock_amphora_repo_get.return_value = _db_amphora_mock
amphora_post_network_plug_obj = (amphora_driver_tasks.
AmphoraePostNetworkPlug())
@ -380,7 +388,8 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_amphora_post_vip_plug(self,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_post_vip_plug(self, mock_lb_get,
mock_loadbalancer_repo_update,
mock_driver,
mock_generate_uuid,
@ -393,6 +402,7 @@ class TestAmphoraDriverTasks(base.TestCase):
amphorae_net_config_mock = mock.MagicMock()
mock_amphora_repo_get.return_value = _db_amphora_mock
mock_lb_get.return_value = _db_load_balancer_mock
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraPostVIPPlug()
amphora_post_vip_plug_obj.execute(_amphora_mock,
_LB_mock,
@ -403,7 +413,7 @@ class TestAmphoraDriverTasks(base.TestCase):
**amphorae_net_config_mock[AMP_ID]['vrrp_port'])
mock_driver.post_vip_plug.assert_called_once_with(
_db_amphora_mock, _LB_mock, amphorae_net_config_mock,
_db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock,
vip_subnet=vip_subnet, vrrp_port=vrrp_port)
# Test revert
@ -437,7 +447,8 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_amphorae_post_vip_plug(self,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphorae_post_vip_plug(self, mock_lb_get,
mock_loadbalancer_repo_update,
mock_driver,
mock_generate_uuid,
@ -454,12 +465,14 @@ class TestAmphoraDriverTasks(base.TestCase):
**amphorae_net_config_mock[AMP_ID]['vip_subnet'])
vrrp_port = network_data_models.Port(
**amphorae_net_config_mock[AMP_ID]['vrrp_port'])
_db_load_balancer_mock.amphorae = [_db_amphora_mock]
mock_lb_get.return_value = _db_load_balancer_mock
amphora_post_vip_plug_obj = amphora_driver_tasks.AmphoraePostVIPPlug()
amphora_post_vip_plug_obj.execute(_LB_mock,
amphorae_net_config_mock)
mock_driver.post_vip_plug.assert_called_once_with(
_db_amphora_mock, _LB_mock, amphorae_net_config_mock,
_db_amphora_mock, _db_load_balancer_mock, amphorae_net_config_mock,
vip_subnet=vip_subnet, vrrp_port=vrrp_port)
# Test revert
@ -502,7 +515,9 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.upload_cert_amp.assert_called_once_with(
_db_amphora_mock, fer.decrypt(pem_file_mock.encode('utf-8')))
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_update_vrrp_interface(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
@ -511,7 +526,8 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_listener_repo_update,
mock_amphora_repo_get,
mock_amphora_repo_update):
_LB_mock.amphorae = _amphorae_mock
_db_load_balancer_mock.amphorae = _amphorae_mock
mock_lb_get.return_value = _db_load_balancer_mock
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
@ -524,8 +540,8 @@ class TestAmphoraDriverTasks(base.TestCase):
# Test revert
mock_driver.reset_mock()
_db_load_balancer_mock.amphorae = _amphorae_mock
_LB_mock.amphorae = _amphorae_mock
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
mock_amphora_repo_update.assert_called_with(_session_mock,
_db_amphora_mock.id,
@ -533,6 +549,7 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.reset_mock()
mock_amphora_repo_update.reset_mock()
_db_load_balancer_mock.amphorae = _amphorae_mock
failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT"))
amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock)
@ -543,13 +560,14 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
_LB_mock.amphorae = _amphorae_mock
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
mock_amphora_repo_update.assert_called_with(_session_mock,
_db_amphora_mock.id,
vrrp_interface=None)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_update(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
@ -559,13 +577,16 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_get,
mock_amphora_repo_update):
amphorae_network_config = mock.MagicMock()
mock_lb_get.return_value = _db_load_balancer_mock
amphora_vrrp_update_obj = (
amphora_driver_tasks.AmphoraVRRPUpdate())
amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config)
mock_driver.update_vrrp_conf.assert_called_once_with(
_LB_mock, amphorae_network_config)
_db_load_balancer_mock, amphorae_network_config)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_stop(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
@ -576,10 +597,14 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_update):
amphora_vrrp_stop_obj = (
amphora_driver_tasks.AmphoraVRRPStop())
mock_lb_get.return_value = _db_load_balancer_mock
amphora_vrrp_stop_obj.execute(_LB_mock)
mock_driver.stop_vrrp_service.assert_called_once_with(_LB_mock)
mock_driver.stop_vrrp_service.assert_called_once_with(
_db_load_balancer_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_start(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
@ -590,8 +615,10 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_update):
amphora_vrrp_start_obj = (
amphora_driver_tasks.AmphoraVRRPStart())
mock_lb_get.return_value = _db_load_balancer_mock
amphora_vrrp_start_obj.execute(_LB_mock)
mock_driver.start_vrrp_service.assert_called_once_with(_LB_mock)
mock_driver.start_vrrp_service.assert_called_once_with(
_db_load_balancer_mock)
def test_amphora_compute_connectivity_wait(self,
mock_driver,

View File

@ -37,6 +37,7 @@ AMP_WAIT = 12
AMPHORA_ID = uuidutils.generate_uuid()
COMPUTE_ID = uuidutils.generate_uuid()
LB_NET_IP = '192.0.2.1'
LB_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
SERVER_GRPOUP_ID = uuidutils.generate_uuid()
@ -57,8 +58,14 @@ _amphora_mock = {
constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID
}
_load_balancer_mock = mock.MagicMock()
_load_balancer_mock.amphorae = [_db_amphora_mock]
_db_load_balancer_mock = mock.MagicMock()
_db_load_balancer_mock.amphorae = [_db_amphora_mock]
_db_load_balancer_mock.to_dict.return_value = {
constants.ID: LB_ID,
}
_load_balancer_mock = {
constants.LOADBALANCER_ID: LB_ID,
}
_port = mock.MagicMock()
_port.id = PORT_ID
@ -498,9 +505,13 @@ class TestComputeTasks(base.TestCase):
mock_remove_from_build_queue.assert_not_called()
@mock.patch('stevedore.driver.DriverManager.driver')
def test_delete_amphorae_on_load_balancer(self, mock_driver):
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_delete_amphorae_on_load_balancer(self, mock_lb_get, mock_session,
mock_driver):
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
mock_lb_get.return_value = _db_load_balancer_mock
delete_amps.execute(_load_balancer_mock)
mock_driver.delete.assert_called_once_with(COMPUTE_ID)

View File

@ -22,6 +22,7 @@ from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from taskflow.types import failure
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models
from octavia.common import utils
@ -66,9 +67,13 @@ _db_amphora_mock.vrrp_port_id = VRRP_PORT_ID
_db_amphora_mock.role = AMP_ROLE
_db_amphora_mock.vrrp_id = VRRP_ID
_db_amphora_mock.vrrp_priority = VRRP_PRIORITY
_loadbalancer_mock = mock.MagicMock()
_loadbalancer_mock.id = LB_ID
_loadbalancer_mock.amphorae = [_db_amphora_mock]
_db_loadbalancer_mock = mock.MagicMock()
_db_loadbalancer_mock.id = LB_ID
_db_loadbalancer_mock.vip_address = VIP_IP
_db_loadbalancer_mock.amphorae = [_db_amphora_mock]
_db_loadbalancer_mock.to_dict.return_value = {
constants.ID: LB_ID
}
_l7policy_mock = mock.MagicMock()
_l7policy_mock.id = L7POLICY_ID
_l7rule_mock = mock.MagicMock()
@ -83,6 +88,11 @@ _vip_mock = mock.MagicMock()
_vip_mock.port_id = PORT_ID
_vip_mock.subnet_id = SUBNET_ID
_vip_mock.ip_address = VIP_IP
_vip_mock.to_dict.return_value = {
constants.PORT_ID: PORT_ID,
constants.SUBNET_ID: SUBNET_ID,
constants.IP_ADDRESS: VIP_IP,
}
_vrrp_group_mock = mock.MagicMock()
_cert_mock = mock.MagicMock()
_compute_mock_dict = {
@ -111,8 +121,9 @@ class TestDatabaseTasks(base.TestCase):
self.listener_mock = mock.MagicMock()
self.listener_mock.id = LISTENER_ID
self.loadbalancer_mock = mock.MagicMock()
self.loadbalancer_mock.id = LB_ID
self.loadbalancer_mock = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_loadbalancer_mock).to_dict())
self.member_mock = mock.MagicMock()
self.member_mock.id = MEMBER_ID
@ -419,7 +430,7 @@ class TestDatabaseTasks(base.TestCase):
self.assertEqual(_db_amphora_mock.to_dict(), amp)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_loadbalancer_mock)
return_value=_db_loadbalancer_mock)
def test_reload_load_balancer(self,
mock_lb_get,
mock_generate_uuid,
@ -437,10 +448,10 @@ class TestDatabaseTasks(base.TestCase):
'TEST',
id=LB_ID)
self.assertEqual(_loadbalancer_mock, lb)
self.assertEqual(self.loadbalancer_mock, lb)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_loadbalancer_mock)
return_value=_db_loadbalancer_mock)
@mock.patch('octavia.db.repositories.VipRepository.update')
def test_update_vip_after_allocation(self,
mock_vip_update,
@ -454,9 +465,9 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_delete):
update_vip = database_tasks.UpdateVIPAfterAllocation()
loadbalancer = update_vip.execute(LB_ID, _vip_mock)
loadbalancer = update_vip.execute(LB_ID, _vip_mock.to_dict())
self.assertEqual(_loadbalancer_mock, loadbalancer)
self.assertEqual(self.loadbalancer_mock, loadbalancer)
mock_vip_update.assert_called_once_with('TEST',
LB_ID,
port_id=PORT_ID,
@ -577,7 +588,7 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_delete):
map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora()
amp = map_lb_to_amp.execute(self.loadbalancer_mock.id)
amp = map_lb_to_amp.execute(LB_ID)
repo.AmphoraRepository.allocate_and_associate.assert_called_once_with(
'TEST',
@ -586,12 +597,12 @@ class TestDatabaseTasks(base.TestCase):
self.assertEqual(self.amphora, amp)
amp_id = map_lb_to_amp.execute(self.loadbalancer_mock.id)
amp_id = map_lb_to_amp.execute(LB_ID)
self.assertIsNone(amp_id)
# Test revert
map_lb_to_amp.revert(None, self.loadbalancer_mock.id)
map_lb_to_amp.revert(None, LB_ID)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
id=LB_ID,
@ -600,7 +611,7 @@ class TestDatabaseTasks(base.TestCase):
# Test revert with exception
repo.LoadBalancerRepository.update.reset_mock()
mock_loadbalancer_repo_update.side_effect = Exception('fail')
map_lb_to_amp.revert(None, self.loadbalancer_mock.id)
map_lb_to_amp.revert(None, LB_ID)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
id=LB_ID,
@ -621,7 +632,7 @@ class TestDatabaseTasks(base.TestCase):
map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora()
amp = map_lb_to_amp.execute(
self.loadbalancer_mock.id, availability_zone={
_db_loadbalancer_mock.id, availability_zone={
constants.COMPUTE_ZONE: 'fakeaz'})
repo.AmphoraRepository.allocate_and_associate.assert_called_once_with(
@ -631,12 +642,12 @@ class TestDatabaseTasks(base.TestCase):
self.assertEqual(self.amphora, amp)
amp = map_lb_to_amp.execute(self.loadbalancer_mock.id)
amp = map_lb_to_amp.execute(_db_loadbalancer_mock.id)
self.assertIsNone(amp)
# Test revert
map_lb_to_amp.revert(None, self.loadbalancer_mock.id)
map_lb_to_amp.revert(None, _db_loadbalancer_mock.id)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
id=LB_ID,
@ -645,7 +656,7 @@ class TestDatabaseTasks(base.TestCase):
# Test revert with exception
repo.LoadBalancerRepository.update.reset_mock()
mock_loadbalancer_repo_update.side_effect = Exception('fail')
map_lb_to_amp.revert(None, self.loadbalancer_mock.id)
map_lb_to_amp.revert(None, _db_loadbalancer_mock.id)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
id=LB_ID,
@ -654,7 +665,7 @@ class TestDatabaseTasks(base.TestCase):
@mock.patch('octavia.db.repositories.AmphoraRepository.get',
return_value=_db_amphora_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_loadbalancer_mock)
return_value=_db_loadbalancer_mock)
def test_mark_lb_amphorae_deleted_in_db(self,
mock_loadbalancer_repo_get,
mock_amphora_repo_get,
@ -668,7 +679,7 @@ class TestDatabaseTasks(base.TestCase):
mark_amp_deleted_in_db = (database_tasks.
MarkLBAmphoraeDeletedInDB())
mark_amp_deleted_in_db.execute(_loadbalancer_mock)
mark_amp_deleted_in_db.execute(self.loadbalancer_mock)
repo.AmphoraRepository.update.assert_called_once_with(
'TEST',
@ -678,7 +689,7 @@ class TestDatabaseTasks(base.TestCase):
@mock.patch('octavia.db.repositories.AmphoraRepository.get',
return_value=_db_amphora_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_loadbalancer_mock)
return_value=_db_loadbalancer_mock)
def test_mark_amphora_allocated_in_db(self,
mock_loadbalancer_repo_get,
mock_amphora_repo_get,
@ -693,7 +704,7 @@ class TestDatabaseTasks(base.TestCase):
mark_amp_allocated_in_db = (database_tasks.
MarkAmphoraAllocatedInDB())
mark_amp_allocated_in_db.execute(self.amphora,
self.loadbalancer_mock.id)
LB_ID)
repo.AmphoraRepository.update.assert_called_once_with(
'TEST',
@ -707,7 +718,7 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_update.reset_mock()
mark_amp_allocated_in_db.revert(None, self.amphora,
self.loadbalancer_mock.id)
LB_ID)
repo.AmphoraRepository.update.assert_called_once_with(
'TEST',
@ -719,7 +730,7 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail')
mark_amp_allocated_in_db.revert(None, self.amphora,
self.loadbalancer_mock.id)
LB_ID)
repo.AmphoraRepository.update.assert_called_once_with(
'TEST',
@ -1246,7 +1257,9 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ERROR)
self.assertEqual(0, repo.ListenerRepository.update.call_count)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_mark_LB_active_in_db_and_listeners(self,
mock_lb_get,
mock_generate_uuid,
mock_LOG,
mock_get_session,
@ -1257,8 +1270,9 @@ class TestDatabaseTasks(base.TestCase):
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners)
mock_lb_get.return_value = lb
mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True)
mark_lb_active.execute(lb)
mark_lb_active.execute(self.loadbalancer_mock)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
@ -1273,7 +1287,7 @@ class TestDatabaseTasks(base.TestCase):
mock_loadbalancer_repo_update.reset_mock()
mock_listener_repo_update.reset_mock()
mark_lb_active.revert(lb)
mark_lb_active.revert(self.loadbalancer_mock)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
@ -1291,7 +1305,9 @@ class TestDatabaseTasks(base.TestCase):
@mock.patch('octavia.db.repositories.HealthMonitorRepository.update')
@mock.patch('octavia.db.repositories.L7PolicyRepository.update')
@mock.patch('octavia.db.repositories.L7RuleRepository.update')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_mark_LB_active_in_db_full_graph(self,
mock_lb_repo_get,
mock_l7r_repo_update,
mock_l7p_repo_update,
mock_hm_repo_update,
@ -1331,29 +1347,26 @@ class TestDatabaseTasks(base.TestCase):
lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners,
pools=pools)
mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True)
mark_lb_active.execute(lb)
mock_lb_repo_get.return_value = lb
mark_lb_active.execute(self.loadbalancer_mock)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
lb.id,
provisioning_status=constants.ACTIVE)
self.assertEqual(2, repo.ListenerRepository.update.call_count)
repo.ListenerRepository.update.has_calls(
[mock.call('TEST', listeners[0].id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', listeners[1].id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(2, repo.PoolRepository.update.call_count)
repo.PoolRepository.update.has_calls(
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ACTIVE),
mock.call('TEST', redirect_pool.id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(1, repo.HealthMonitorRepository.update.call_count)
repo.HealthMonitorRepository.update.has_calls(
[mock.call('TEST', health_monitor.id,
provisioning_status=constants.ACTIVE)])
self.assertEqual(1, repo.L7PolicyRepository.update.call_count)
repo.L7PolicyRepository.update.has_calls(
[mock.call('TEST', l7policies[0].id,
provisioning_status=constants.ACTIVE)])
@ -1369,7 +1382,7 @@ class TestDatabaseTasks(base.TestCase):
mock_hm_repo_update.reset_mock()
mock_l7p_repo_update.reset_mock()
mock_l7r_repo_update.reset_mock()
mark_lb_active.revert(lb)
mark_lb_active.revert(self.loadbalancer_mock)
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
@ -1381,7 +1394,6 @@ class TestDatabaseTasks(base.TestCase):
provisioning_status=constants.ERROR),
mock.call('TEST', listeners[1].id,
provisioning_status=constants.ERROR)])
self.assertEqual(2, repo.PoolRepository.update.call_count)
repo.PoolRepository.update.has_calls(
[mock.call('TEST', default_pool.id,
provisioning_status=constants.ERROR),
@ -1560,7 +1572,7 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_update,
mock_amphora_delete):
self.loadbalancer_mock.vip.load_balancer_id = LB_ID
_db_loadbalancer_mock.vip.load_balancer_id = LB_ID
update_load_balancer = database_tasks.UpdateLoadbalancerInDB()
update_load_balancer.execute(self.loadbalancer_mock,
{'name': 'test',
@ -1864,7 +1876,9 @@ class TestDatabaseTasks(base.TestCase):
'TEST', AMP_ID, role=None, vrrp_priority=None)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_amphorae_from_loadbalancer(self,
mock_lb_get,
mock_amphora_get,
mock_generate_uuid,
mock_LOG,
@ -1881,14 +1895,17 @@ class TestDatabaseTasks(base.TestCase):
lb.amphorae = [amp1, amp2]
mock_amphora_get.side_effect = [_db_amphora_mock, None]
mock_lb_get.return_value = lb
get_amps_from_lb_obj = database_tasks.GetAmphoraeFromLoadbalancer()
result = get_amps_from_lb_obj.execute(lb)
result = get_amps_from_lb_obj.execute(self.loadbalancer_mock)
self.assertEqual([_db_amphora_mock.to_dict()], result)
self.assertEqual([_db_amphora_mock.to_dict()], result)
@mock.patch('octavia.db.repositories.ListenerRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_listeners_from_loadbalancer(self,
mock_lb_get,
mock_listener_get,
mock_generate_uuid,
mock_LOG,
@ -1898,13 +1915,16 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_update,
mock_amphora_repo_delete):
mock_listener_get.return_value = _listener_mock
_loadbalancer_mock.listeners = [_listener_mock]
_db_loadbalancer_mock.listeners = [_listener_mock]
mock_lb_get.return_value = _db_loadbalancer_mock
get_list_from_lb_obj = database_tasks.GetListenersFromLoadbalancer()
result = get_list_from_lb_obj.execute(_loadbalancer_mock)
result = get_list_from_lb_obj.execute(self.loadbalancer_mock)
mock_listener_get.assert_called_once_with('TEST', id=_listener_mock.id)
self.assertEqual([{constants.LISTENER_ID: LISTENER_ID}], result)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_vip_from_loadbalancer(self,
mock_lb_get,
mock_generate_uuid,
mock_LOG,
mock_get_session,
@ -1912,10 +1932,11 @@ class TestDatabaseTasks(base.TestCase):
mock_listener_repo_update,
mock_amphora_repo_update,
mock_amphora_repo_delete):
_loadbalancer_mock.vip = _vip_mock
_db_loadbalancer_mock.vip = _vip_mock
mock_lb_get.return_value = _db_loadbalancer_mock
get_vip_from_lb_obj = database_tasks.GetVipFromLoadbalancer()
result = get_vip_from_lb_obj.execute(_loadbalancer_mock)
self.assertEqual(_vip_mock, result)
result = get_vip_from_lb_obj.execute(self.loadbalancer_mock)
self.assertEqual(_vip_mock.to_dict(), result)
@mock.patch('octavia.db.repositories.VRRPGroupRepository.create')
def test_create_vrrp_group_for_lb(self,
@ -1931,7 +1952,7 @@ class TestDatabaseTasks(base.TestCase):
mock_get_session.side_effect = ['TEST',
odb_exceptions.DBDuplicateEntry]
create_vrrp_group = database_tasks.CreateVRRPGroupForLB()
create_vrrp_group.execute(_loadbalancer_mock)
create_vrrp_group.execute(self.loadbalancer_mock)
mock_vrrp_group_create.assert_called_once_with(
'TEST', load_balancer_id=LB_ID,
vrrp_group_name=LB_ID.replace('-', ''),
@ -1939,7 +1960,7 @@ class TestDatabaseTasks(base.TestCase):
vrrp_auth_pass=mock_generate_uuid.return_value.replace('-',
'')[0:7],
advert_int=1)
create_vrrp_group.execute(_loadbalancer_mock)
create_vrrp_group.execute(self.loadbalancer_mock)
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
def test_disable_amphora_health_monitoring(self,
@ -1957,8 +1978,10 @@ class TestDatabaseTasks(base.TestCase):
'TEST', amphora_id=AMP_ID)
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_disable_lb_amphorae_health_monitoring(
self,
mock_lb_get,
mock_amp_health_repo_delete,
mock_generate_uuid,
mock_LOG,
@ -1969,7 +1992,8 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_delete):
disable_amp_health = (
database_tasks.DisableLBAmphoraeHealthMonitoring())
disable_amp_health.execute(_loadbalancer_mock)
mock_lb_get.return_value = _db_loadbalancer_mock
disable_amp_health.execute(self.loadbalancer_mock)
mock_amp_health_repo_delete.assert_called_once_with(
'TEST', amphora_id=AMP_ID)
@ -1989,8 +2013,10 @@ class TestDatabaseTasks(base.TestCase):
'TEST', amphora_id=AMP_ID, busy=True)
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.update')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_mark_lb_amphorae_health_monitoring_busy(
self,
mock_lb_get,
mock_amp_health_repo_update,
mock_generate_uuid,
mock_LOG,
@ -2001,7 +2027,8 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_delete):
mark_busy = (
database_tasks.MarkLBAmphoraeHealthBusy())
mark_busy.execute(_loadbalancer_mock)
mock_lb_get.return_value = _db_loadbalancer_mock
mark_busy.execute(self.loadbalancer_mock)
mock_amp_health_repo_update.assert_called_once_with(
'TEST', amphora_id=AMP_ID, busy=True)

View File

@ -164,9 +164,10 @@ class TestDatabaseTasksQuota(base.TestCase):
self._test_decrement_quota(task, data_model, project_id=project_id)
def test_decrement_loadbalancer_quota(self):
project_id = uuidutils.generate_uuid()
task = database_tasks.DecrementLoadBalancerQuota()
data_model = data_models.LoadBalancer
self._test_decrement_quota(task, data_model)
self._test_decrement_quota(task, data_model, project_id=project_id)
def test_decrement_pool_quota(self):
project_id = uuidutils.generate_uuid()

View File

@ -40,9 +40,8 @@ class TestLifecycleTasks(base.TestCase):
self.LISTENER_ID = uuidutils.generate_uuid()
self.LISTENER = {constants.LISTENER_ID: self.LISTENER_ID}
self.LISTENERS = [self.LISTENER]
self.LOADBALANCER = mock.MagicMock()
self.LOADBALANCER_ID = uuidutils.generate_uuid()
self.LOADBALANCER.id = self.LOADBALANCER_ID
self.LOADBALANCER = {constants.LOADBALANCER_ID: self.LOADBALANCER_ID}
self.LISTENER[constants.LOADBALANCER_ID] = self.LOADBALANCER_ID
self.MEMBER = mock.MagicMock()
self.MEMBER_ID = uuidutils.generate_uuid()
@ -278,12 +277,14 @@ class TestLifecycleTasks(base.TestCase):
lifecycle_tasks.LoadBalancerToErrorOnRevertTask())
# Execute
loadbalancer_to_error_on_revert.execute(self.LOADBALANCER)
loadbalancer_to_error_on_revert.execute({constants.LOADBALANCER_ID:
self.LOADBALANCER_ID})
self.assertFalse(mock_loadbalancer_prov_status_error.called)
# Revert
loadbalancer_to_error_on_revert.revert(self.LOADBALANCER)
loadbalancer_to_error_on_revert.revert({constants.LOADBALANCER_ID:
self.LOADBALANCER_ID})
mock_loadbalancer_prov_status_error.assert_called_once_with(
self.LOADBALANCER_ID)

View File

@ -19,6 +19,7 @@ from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.controller.worker.v2.tasks import network_tasks
@ -72,11 +73,11 @@ class TestNetworkTasks(base.TestCase):
def setUp(self):
network_tasks.LOG = mock.MagicMock()
self.db_amphora_mock = mock.MagicMock()
self.load_balancer_mock = mock.MagicMock()
self.db_load_balancer_mock = mock.MagicMock()
self.vip_mock = mock.MagicMock()
self.vip_mock.subnet_id = SUBNET_ID
self.load_balancer_mock.vip = self.vip_mock
self.load_balancer_mock.amphorae = []
self.db_load_balancer_mock.vip = self.vip_mock
self.db_load_balancer_mock.amphorae = []
self.db_amphora_mock.id = AMPHORA_ID
self.db_amphora_mock.compute_id = COMPUTE_ID
self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED
@ -84,14 +85,25 @@ class TestNetworkTasks(base.TestCase):
constants.COMPUTE_ID: COMPUTE_ID,
constants.LB_NETWORK_IP: IP_ADDRESS,
}
self.load_balancer_mock = {
constants.LOADBALANCER_ID: uuidutils.generate_uuid(),
constants.VIP_SUBNET_ID: VIP.subnet_id,
constants.VIP_PORT_ID: VIP.port_id,
constants.VIP_ADDRESS: VIP.ip_address,
constants.VIP_QOS_POLICY_ID: t_constants.MOCK_QOS_POLICY_ID1
}
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="controller_worker", amp_boot_network_list=['netid'])
super(TestNetworkTasks, self).setUp()
def test_calculate_delta(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_delta(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = self.db_load_balancer_mock
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID,
@ -116,16 +128,16 @@ class TestNetworkTasks(base.TestCase):
# Delta should be empty
mock_driver.reset_mock()
self.db_amphora_mock.load_balancer = self.load_balancer_mock
self.load_balancer_mock.amphorae = [self.db_amphora_mock]
self.load_balancer_mock.pools = []
self.db_amphora_mock.load_balancer = self.db_load_balancer_mock
self.db_load_balancer_mock.amphorae = [self.db_amphora_mock]
self.db_load_balancer_mock.pools = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock))
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Pool mock should be configured explicitly for each test
pool_mock = mock.MagicMock()
self.load_balancer_mock.pools = [pool_mock]
self.db_load_balancer_mock.pools = [pool_mock]
# Test with one amp and one pool but no members, nothing plugged
# Delta should be empty
@ -322,7 +334,7 @@ class TestNetworkTasks(base.TestCase):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.GetMemberPorts()
net_task.execute(LB, self.amphora_mock)
net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
@ -500,47 +512,59 @@ class TestNetworkTasks(base.TestCase):
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_plug_vip(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
LB.amphorae = AMPS_DATA
mock_get_lb.return_value = LB
LB.amphorae = AMPS_DATA
net = network_tasks.PlugVIP()
amp = mock.MagicMock()
amp.to_dict.return_value = 'vip'
mock_driver.plug_vip.return_value = [amp]
data = net.execute(LB)
data = net.execute(self.load_balancer_mock)
mock_driver.plug_vip.assert_called_once_with(LB, LB.vip)
self.assertEqual(["vip"], data)
# revert
net.revert(["vip"], LB)
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
# revert with exception
mock_driver.reset_mock()
mock_driver.unplug_vip.side_effect = Exception('UnplugVipException')
net.revert(["vip"], LB)
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
def test_apply_qos_on_creation(self, mock_get_lb_db, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_creation(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
mock_get_lb_db.return_value = LB
mock_get_lb.return_value = LB
# execute
UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE
update_dict = UPDATE_DICT
net.execute(LB, [AMPS_DATA[0]], update_dict)
net.execute(self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.execute(LB, AMPS_DATA, update_dict)
net.execute(self.load_balancer_mock, AMPS_DATA, update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
@ -548,16 +572,19 @@ class TestNetworkTasks(base.TestCase):
# revert
mock_driver.reset_mock()
update_dict = UPDATE_DICT
net.revert(None, LB, [AMPS_DATA[0]], update_dict)
net.revert(None, self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.revert(None, LB, AMPS_DATA, update_dict)
net.revert(None, self.load_balancer_mock, AMPS_DATA, update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
def test_apply_qos_on_update(self, mock_get_lb_db, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_update(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
@ -565,48 +592,58 @@ class TestNetworkTasks(base.TestCase):
null_qos_lb = o_data_models.LoadBalancer(
vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
null_qos_lb_dict = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
null_qos_lb).to_dict())
tmp_vip_object = o_data_models.Vip(
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
tmp_lb = o_data_models.LoadBalancer(
vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
tmp_lb).to_dict()
mock_get_lb.return_value = tmp_lb
# execute
update_dict = {'description': 'fool'}
net.execute(tmp_lb, update_dict=update_dict)
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
mock_get_lb.return_value = null_qos_lb
update_dict = {'vip': {'qos_policy_id': None}}
net.execute(null_qos_lb, update_dict=update_dict)
net.execute(null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
None, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'name': '123'}
net.execute(null_qos_lb, update_dict=update_dict)
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'description': 'fool'}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
net.execute(tmp_lb, update_dict=update_dict)
mock_get_lb.return_value = tmp_lb
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
mock_get_lb.reset_mock()
tmp_lb.amphorae = [AMPS_DATA[0]]
tmp_lb.topology = constants.TOPOLOGY_SINGLE
update_dict = {'description': 'fool'}
mock_get_lb_db.return_value = tmp_lb
net.revert(None, tmp_lb, update_dict=update_dict)
net.revert(None, pr_tm_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
@ -614,12 +651,13 @@ class TestNetworkTasks(base.TestCase):
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, null_qos_lb, update_dict=update_dict)
net.revert(None, null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
tmp_lb.amphorae = AMPS_DATA
@ -627,49 +665,64 @@ class TestNetworkTasks(base.TestCase):
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, tmp_lb, update_dict=update_dict)
net.revert(None, pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
def test_unplug_vip(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_unplug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UnplugVIP()
net.execute(LB)
net.execute(self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
def test_allocate_vip(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_allocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.AllocateVIP()
mock_driver.allocate_vip.return_value = LB.vip
mock_driver.reset_mock()
self.assertEqual(LB.vip, net.execute(LB))
self.assertEqual(LB.vip.to_dict(),
net.execute(self.load_balancer_mock))
mock_driver.allocate_vip.assert_called_once_with(LB)
# revert
vip_mock = mock.MagicMock()
net.revert(vip_mock, LB)
mock_driver.deallocate_vip.assert_called_once_with(vip_mock)
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(
o_data_models.Vip(**vip_mock))
# revert exception
mock_driver.reset_mock()
mock_driver.deallocate_vip.side_effect = Exception('DeallVipException')
vip_mock = mock.MagicMock()
net.revert(vip_mock, LB)
mock_driver.deallocate_vip.assert_called_once_with(vip_mock)
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(o_data_models.Vip(
**vip_mock))
def test_deallocate_vip(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_deallocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.DeallocateVIP()
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
net.execute(lb)
mock_get_lb.return_value = lb
net.execute(self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(lb.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@ -700,12 +753,16 @@ class TestNetworkTasks(base.TestCase):
net_task.execute(listener)
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
def test_get_amphorae_network_configs(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_get_amphorae_network_configs(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
lb = o_data_models.LoadBalancer()
net_task = network_tasks.GetAmphoraeNetworkConfigs()
net_task.execute(lb)
net_task.execute(self.load_balancer_mock)
mock_driver.get_network_configs.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@ -823,12 +880,16 @@ class TestNetworkTasks(base.TestCase):
mock_driver.wait_for_port_detach.assert_called_once_with(
self.db_amphora_mock)
def test_update_vip_sg(self, mock_get_net_driver):
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_sg(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UpdateVIPSecurityGroup()
net.execute(LB)
net.execute(self.load_balancer_mock)
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
def test_get_subnet_from_vip(self, mock_get_net_driver):
@ -836,35 +897,40 @@ class TestNetworkTasks(base.TestCase):
mock_get_net_driver.return_value = mock_driver
net = network_tasks.GetSubnetFromVIP()
net.execute(LB)
net.execute(self.load_balancer_mock)
mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_plug_vip_amphora(self, mock_session, mock_get,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmpphora()
mockSubnet = mock_driver.get_subnet()
net.execute(LB, amphora, mockSubnet)
net.execute(self.load_balancer_mock, amphora, mockSubnet)
mock_driver.plug_aap_port.assert_called_once_with(
LB, LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_revert_plug_vip_amphora(self, mock_session, mock_get,
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_revert_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmpphora()
mockSubnet = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
net.revert(AMPS_DATA[0].to_dict(), LB, amphora, mockSubnet)
net.revert(AMPS_DATA[0].to_dict(), self.load_balancer_mock,
amphora, mockSubnet)
mock_driver.unplug_aap_port.assert_called_once_with(
LB.vip, self.db_amphora_mock, mockSubnet)

View File

@ -18,6 +18,7 @@ from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from octavia.api.drivers import utils as provider_utils
from octavia.common import base_taskflow
from octavia.common import constants
from octavia.common import data_models
@ -55,19 +56,22 @@ _flow_mock = mock.MagicMock()
_health_mon_mock = mock.MagicMock()
_vip_mock = mock.MagicMock()
_listener_mock = mock.MagicMock()
_load_balancer_mock = mock.MagicMock()
_load_balancer_mock.listeners = [_listener_mock]
_load_balancer_mock.project_id = PROJECT_ID
_db_load_balancer_mock = mock.MagicMock()
_load_balancer_mock = {
constants.LOADBALANCER_ID: LB_ID
}
_member_mock = mock.MagicMock()
_pool_mock = {constants.POOL_ID: POOL_ID}
_db_pool_mock = mock.MagicMock()
_db_pool_mock.load_balancer = _db_load_balancer_mock
_member_mock.pool = _db_pool_mock
_l7policy_mock = mock.MagicMock()
_l7rule_mock = mock.MagicMock()
_create_map_flow_mock = mock.MagicMock()
_db_amphora_mock.load_balancer_id = LB_ID
_db_amphora_mock.id = AMP_ID
_db_session = mock.MagicMock()
CONF = cfg.CONF
@ -85,7 +89,7 @@ class TestException(Exception):
@mock.patch('octavia.db.repositories.HealthMonitorRepository.get',
return_value=_health_mon_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get',
return_value=_load_balancer_mock)
return_value=_db_load_balancer_mock)
@mock.patch('octavia.db.repositories.ListenerRepository.get',
return_value=_listener_mock)
@mock.patch('octavia.db.repositories.L7PolicyRepository.get',
@ -107,21 +111,24 @@ class TestControllerWorker(base.TestCase):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
_db_pool_mock.listeners = [_listener_mock]
_db_pool_mock.load_balancer = _load_balancer_mock
_db_pool_mock.load_balancer = _db_load_balancer_mock
_health_mon_mock.pool = _db_pool_mock
_load_balancer_mock.amphorae = _db_amphora_mock
_load_balancer_mock.vip = _vip_mock
_load_balancer_mock.id = LB_ID
_load_balancer_mock.project_id = PROJECT_ID
_listener_mock.load_balancer = _load_balancer_mock
_db_load_balancer_mock.amphorae = _db_amphora_mock
_db_load_balancer_mock.vip = _vip_mock
_db_load_balancer_mock.id = LB_ID
_db_load_balancer_mock.project_id = PROJECT_ID
_listener_mock.load_balancer = _db_load_balancer_mock
_listener_mock.id = LISTENER_ID
_listener_mock.to_dict.return_value = {
constants.ID: LISTENER_ID, constants.LOAD_BALANCER_ID: LB_ID}
self.ref_listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
_member_mock.pool = _db_pool_mock
_l7policy_mock.listener = _listener_mock
_l7rule_mock.l7policy = _l7policy_mock
_db_load_balancer_mock.listeners = [_listener_mock]
_db_load_balancer_mock.to_dict.return_value = {'id': LB_ID}
fetch_mock = mock.MagicMock(return_value=AMP_ID)
_flow_mock.storage.fetch = fetch_mock
@ -265,6 +272,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.create_health_monitor(_health_mon_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -273,9 +282,9 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.POOL_ID:
POOL_ID}))
@ -304,6 +313,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.delete_health_monitor(HM_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -312,9 +323,9 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.POOL_ID:
POOL_ID}))
@ -344,6 +355,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.update_health_monitor(_health_mon_mock.id,
HEALTH_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -351,11 +364,11 @@ class TestControllerWorker(base.TestCase):
_health_mon_mock,
constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.UPDATE_DICT:
HEALTH_UPDATE_DICT}))
@ -385,11 +398,13 @@ class TestControllerWorker(base.TestCase):
listener_dict = {constants.LISTENER_ID: LISTENER_ID,
constants.LOADBALANCER_ID: LB_ID}
cw.create_listener(listener_dict)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(
_flow_mock, store={
constants.LOADBALANCER: _load_balancer_mock,
constants.LOADBALANCER: provider_lb,
constants.LOADBALANCER_ID: LB_ID,
constants.LISTENERS: [listener_dict]}))
@ -501,7 +516,7 @@ class TestControllerWorker(base.TestCase):
mock_lb_repo_get.side_effect = [None, None, None, lb_mock]
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(LB_ID)
cw.create_load_balancer(_load_balancer_mock)
mock_get_create_load_balancer_flow.assert_called_with(
topology=constants.TOPOLOGY_SINGLE, listeners=[])
@ -548,7 +563,7 @@ class TestControllerWorker(base.TestCase):
setattr(mock_lb_repo_get.return_value, 'listeners', [])
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(LB_ID)
cw.create_load_balancer(_load_balancer_mock)
mock_get_create_load_balancer_flow.assert_called_with(
topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[])
@ -579,6 +594,9 @@ class TestControllerWorker(base.TestCase):
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
dict_listeners = [listener.to_dict() for listener in
provider_utils.db_listeners_to_provider_listeners(
listeners)]
lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners,
topology=constants.TOPOLOGY_SINGLE)
mock_lb_repo_get.return_value = lb
@ -593,12 +611,12 @@ class TestControllerWorker(base.TestCase):
}
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(LB_ID)
cw.create_load_balancer(_load_balancer_mock)
# mock_create_single_topology.assert_called_once()
# mock_create_active_standby_topology.assert_not_called()
mock_get_create_load_balancer_flow.assert_called_with(
topology=constants.TOPOLOGY_SINGLE, listeners=lb.listeners)
topology=constants.TOPOLOGY_SINGLE, listeners=dict_listeners)
mock_taskflow_load.assert_called_with(
mock_get_create_load_balancer_flow.return_value, store=store)
mock_eng.run.assert_any_call()
@ -632,6 +650,9 @@ class TestControllerWorker(base.TestCase):
listeners = [data_models.Listener(id='listener1'),
data_models.Listener(id='listener2')]
dict_listeners = [listener.to_dict() for listener in
provider_utils.db_listeners_to_provider_listeners(
listeners)]
lb = data_models.LoadBalancer(
id=LB_ID, listeners=listeners,
topology=constants.TOPOLOGY_ACTIVE_STANDBY)
@ -647,17 +668,18 @@ class TestControllerWorker(base.TestCase):
}
cw = controller_worker.ControllerWorker()
cw.create_load_balancer(LB_ID)
cw.create_load_balancer(_load_balancer_mock)
mock_get_create_load_balancer_flow.assert_called_with(
topology=constants.TOPOLOGY_ACTIVE_STANDBY, listeners=lb.listeners)
topology=constants.TOPOLOGY_ACTIVE_STANDBY,
listeners=dict_listeners)
mock_taskflow_load.assert_called_with(
mock_get_create_load_balancer_flow.return_value, store=store)
mock_eng.run.assert_any_call()
@mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.'
'LoadBalancerFlows.get_delete_load_balancer_flow',
return_value=(_flow_mock, {'test': 'test'}))
return_value=_flow_mock)
def test_delete_load_balancer_without_cascade(self,
mock_get_delete_lb_flow,
mock_api_get_session,
@ -675,27 +697,26 @@ class TestControllerWorker(base.TestCase):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_load_balancer(LB_ID, cascade=False)
cw.delete_load_balancer(_load_balancer_mock, cascade=False)
mock_lb_repo_get.assert_called_once_with(
_db_session,
id=LB_ID)
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.LOADBALANCER:
_load_balancer_mock,
constants.SERVER_GROUP_ID:
_load_balancer_mock.server_group_id,
'test': 'test'
}
)
)
assert_called_once_with(
_flow_mock,
store={constants.LOADBALANCER: _load_balancer_mock,
constants.SERVER_GROUP_ID:
_db_load_balancer_mock.server_group_id,
constants.PROJECT_ID: _db_load_balancer_mock.project_id,
}))
_flow_mock.run.assert_called_once_with()
@mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.'
'LoadBalancerFlows.get_cascade_delete_load_balancer_flow',
return_value=(_flow_mock, {'test': 'test'}))
return_value=_flow_mock)
def test_delete_load_balancer_with_cascade(self,
mock_get_delete_lb_flow,
mock_api_get_session,
@ -713,21 +734,23 @@ class TestControllerWorker(base.TestCase):
_flow_mock.reset_mock()
cw = controller_worker.ControllerWorker()
cw.delete_load_balancer(LB_ID, cascade=True)
cw.delete_load_balancer(_load_balancer_mock, cascade=True)
mock_lb_repo_get.assert_called_once_with(
_db_session,
id=LB_ID)
list_name = 'listener_%s' % _listener_mock.id
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.LOADBALANCER:
_load_balancer_mock,
constants.SERVER_GROUP_ID:
_load_balancer_mock.server_group_id,
'test': 'test'
}
)
assert_called_once_with(
_flow_mock,
store={constants.LOADBALANCER: _load_balancer_mock,
list_name: self.ref_listener_dict,
constants.LOADBALANCER_ID: LB_ID,
constants.SERVER_GROUP_ID:
_db_load_balancer_mock.server_group_id,
constants.PROJECT_ID: _db_load_balancer_mock.project_id,
})
)
_flow_mock.run.assert_called_once_with()
@ -752,15 +775,11 @@ class TestControllerWorker(base.TestCase):
mock_amp_repo_get):
_flow_mock.reset_mock()
_load_balancer_mock.provisioning_status = constants.PENDING_UPDATE
_db_load_balancer_mock.provisioning_status = constants.PENDING_UPDATE
cw = controller_worker.ControllerWorker()
change = 'TEST2'
cw.update_load_balancer(LB_ID, change)
mock_lb_repo_get.assert_called_once_with(
_db_session,
id=LB_ID)
cw.update_load_balancer(_load_balancer_mock, change)
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -768,7 +787,7 @@ class TestControllerWorker(base.TestCase):
constants.LOADBALANCER:
_load_balancer_mock,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
_db_load_balancer_mock.id,
}))
_flow_mock.run.assert_called_once_with()
@ -796,15 +815,17 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.create_member(_member)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.MEMBER: _member,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.POOL_ID:
POOL_ID}))
@ -831,6 +852,8 @@ class TestControllerWorker(base.TestCase):
_member = _member_mock.to_dict()
cw = controller_worker.ControllerWorker()
cw.delete_member(_member)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(
@ -838,9 +861,9 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.POOL_ID:
POOL_ID,
constants.PROJECT_ID: PROJECT_ID}))
@ -870,6 +893,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.update_member(_member, MEMBER_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -877,11 +902,11 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.POOL_ID:
POOL_ID,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.UPDATE_DICT:
MEMBER_UPDATE_DICT}))
@ -911,12 +936,14 @@ class TestControllerWorker(base.TestCase):
constants.POOL_ID: 'testtest'}],
[{constants.MEMBER_ID: 11}],
[MEMBER_UPDATE_DICT])
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(
_flow_mock,
store={constants.LISTENERS: [self.ref_listener_dict],
constants.LOADBALANCER_ID: _load_balancer_mock.id,
constants.LOADBALANCER: _load_balancer_mock,
constants.LOADBALANCER_ID: LB_ID,
constants.LOADBALANCER: provider_lb,
constants.POOL_ID: POOL_ID,
constants.PROJECT_ID: PROJECT_ID}))
@ -944,16 +971,18 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.create_pool(_pool_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock}))
provider_lb}))
_flow_mock.run.assert_called_once_with()
self.assertEqual(1, mock_pool_repo_get.call_count)
@ -980,16 +1009,17 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.delete_pool(_pool_mock)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL_ID: POOL_ID,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.PROJECT_ID: PROJECT_ID}))
_flow_mock.run.assert_called_once_with()
@ -1013,19 +1043,21 @@ class TestControllerWorker(base.TestCase):
_flow_mock.reset_mock()
_db_pool_mock.provisioning_status = constants.PENDING_UPDATE
mock_pool_repo_get.return_value = _db_pool_mock
cw = controller_worker.ControllerWorker()
cw.update_pool(_pool_mock, POOL_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.POOL_ID: POOL_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.UPDATE_DICT:
POOL_UPDATE_DICT}))
@ -1053,16 +1085,18 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.create_l7policy(L7POLICY_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.L7POLICY: _l7policy_mock,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock}))
provider_lb}))
_flow_mock.run.assert_called_once_with()
self.assertEqual(2, mock_l7policy_repo_get.call_count)
@ -1088,6 +1122,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.delete_l7policy(L7POLICY_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -1095,9 +1131,9 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock}))
provider_lb}))
_flow_mock.run.assert_called_once_with()
@ -1123,6 +1159,8 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.update_l7policy(L7POLICY_ID, L7POLICY_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
@ -1130,9 +1168,9 @@ class TestControllerWorker(base.TestCase):
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.UPDATE_DICT:
L7POLICY_UPDATE_DICT}))
@ -1160,17 +1198,19 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.create_l7rule(L7RULE_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.L7RULE: _l7rule_mock,
constants.L7POLICY: _l7policy_mock,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock}))
provider_lb}))
_flow_mock.run.assert_called_once_with()
self.assertEqual(2, mock_l7rule_repo_get.call_count)
@ -1196,17 +1236,19 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.delete_l7rule(L7RULE_ID)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.L7RULE: _l7rule_mock,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.L7POLICY: _l7policy_mock,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock}))
provider_lb}))
_flow_mock.run.assert_called_once_with()
@ -1232,17 +1274,19 @@ class TestControllerWorker(base.TestCase):
cw = controller_worker.ControllerWorker()
cw.update_l7rule(L7RULE_ID, L7RULE_UPDATE_DICT)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
_db_load_balancer_mock).to_dict()
(base_taskflow.BaseTaskFlowEngine._taskflow_load.
assert_called_once_with(_flow_mock,
store={constants.L7RULE: _l7rule_mock,
constants.L7POLICY: _l7policy_mock,
constants.LOADBALANCER_ID:
_load_balancer_mock.id,
LB_ID,
constants.LISTENERS:
[self.ref_listener_dict],
constants.LOADBALANCER:
_load_balancer_mock,
provider_lb,
constants.UPDATE_DICT:
L7RULE_UPDATE_DICT}))
@ -1462,7 +1506,7 @@ class TestControllerWorker(base.TestCase):
_amphora_mock2 = mock.MagicMock()
_amphora_mock3 = mock.MagicMock()
_amphora_mock3.status = constants.DELETED
_load_balancer_mock.amphorae = [
_db_load_balancer_mock.amphorae = [
_db_amphora_mock, _amphora_mock2, _amphora_mock3]
cw = controller_worker.ControllerWorker()
cw.failover_loadbalancer('123')
@ -1472,7 +1516,8 @@ class TestControllerWorker(base.TestCase):
mock_update.assert_called_with(_db_session, '123',
provisioning_status=constants.ACTIVE)
_load_balancer_mock.amphorae = [
mock_perform.reset_mock()
_db_load_balancer_mock.amphorae = [
_db_amphora_mock, _amphora_mock2, _amphora_mock3]
_amphora_mock2.role = constants.ROLE_BACKUP
cw.failover_loadbalancer('123')
@ -1497,7 +1542,7 @@ class TestControllerWorker(base.TestCase):
return_value=_flow_mock)
@mock.patch(
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
return_value=_load_balancer_mock)
return_value=_db_load_balancer_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
def test_failover_amphora_anti_affinity(self,
mock_update,
@ -1519,7 +1564,7 @@ class TestControllerWorker(base.TestCase):
self.conf.config(group="nova", enable_anti_affinity=True)
_flow_mock.reset_mock()
_load_balancer_mock.server_group_id = "123"
_db_load_balancer_mock.server_group_id = "123"
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)