Merge "Update amphora v2 for the failover refactor"

This commit is contained in:
Zuul 2020-09-04 14:15:37 +00:00 committed by Gerrit Code Review
commit 46de66b240
32 changed files with 3742 additions and 1386 deletions

1
.gitignore vendored
View File

@ -10,6 +10,7 @@ doc/build
doc/source/configuration/_static/octavia.policy.yaml.sample doc/source/configuration/_static/octavia.policy.yaml.sample
doc/source/contributor/devref/erd.svg doc/source/contributor/devref/erd.svg
doc/source/contributor/devref/flow_diagrams/ doc/source/contributor/devref/flow_diagrams/
doc/source/contributor/devref/flow_diagrams_v2/
doc/source/contributor/modules doc/source/contributor/modules
api-ref/build api-ref/build
.idea/* .idea/*

View File

@ -29,6 +29,8 @@ from tools import create_flow_docs
# Generate our flow diagrams # Generate our flow diagrams
create_flow_docs.generate( create_flow_docs.generate(
'tools/flow-list.txt', 'doc/source/contributor/devref/flow_diagrams') 'tools/flow-list.txt', 'doc/source/contributor/devref/flow_diagrams')
create_flow_docs.generate(
'tools/flow-list-v2.txt', 'doc/source/contributor/devref/flow_diagrams_v2')
# Generate entity relationship diagram # Generate entity relationship diagram
desc = sadisplay.describe( desc = sadisplay.describe(

View File

@ -19,3 +19,17 @@ Octavia controller.
flow_diagrams/LoadBalancerFlows.rst flow_diagrams/LoadBalancerFlows.rst
flow_diagrams/MemberFlows.rst flow_diagrams/MemberFlows.rst
flow_diagrams/PoolFlows.rst flow_diagrams/PoolFlows.rst
The following are flow diagrams for the **amphora V2** driver.
.. toctree::
:maxdepth: 1
flow_diagrams_v2/AmphoraFlows.rst
flow_diagrams_v2/HealthMonitorFlows.rst
flow_diagrams_v2/L7PolicyFlows.rst
flow_diagrams_v2/L7RuleFlows.rst
flow_diagrams_v2/ListenerFlows.rst
flow_diagrams_v2/LoadBalancerFlows.rst
flow_diagrams_v2/MemberFlows.rst
flow_diagrams_v2/PoolFlows.rst

View File

@ -87,8 +87,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
try: try:
vip = network_driver.allocate_vip(lb_obj) vip = network_driver.allocate_vip(lb_obj)
except network_base.AllocateVIPException as e: except network_base.AllocateVIPException as e:
raise exceptions.DriverError(user_fault_string=e.orig_msg, message = str(e)
operator_fault_string=e.orig_msg) if getattr(e, 'orig_msg', None) is not None:
message = e.orig_msg
raise exceptions.DriverError(user_fault_string=message,
operator_fault_string=message)
LOG.info('Amphora provider created VIP port %s for load balancer %s.', LOG.info('Amphora provider created VIP port %s for load balancer %s.',
vip.port_id, loadbalancer_id) vip.port_id, loadbalancer_id)

View File

@ -300,6 +300,7 @@ ALLOWED_ADDRESS_PAIRS = 'allowed_address_pairs'
AMP_DATA = 'amp_data' AMP_DATA = 'amp_data'
AMP_VRRP_INT = 'amp_vrrp_int' AMP_VRRP_INT = 'amp_vrrp_int'
AMPHORA = 'amphora' AMPHORA = 'amphora'
AMPHORA_DICT = 'amphora_dict'
AMPHORA_ID = 'amphora_id' AMPHORA_ID = 'amphora_id'
AMPHORA_INDEX = 'amphora_index' AMPHORA_INDEX = 'amphora_index'
AMPHORA_NETWORK_CONFIG = 'amphora_network_config' AMPHORA_NETWORK_CONFIG = 'amphora_network_config'

View File

@ -122,7 +122,7 @@ class ListenersStart(BaseAmphoraTask):
class AmphoraIndexListenersReload(BaseAmphoraTask): class AmphoraIndexListenersReload(BaseAmphoraTask):
"""Task to reload all listeners on an amphora.""" """Task to reload all listeners on an amphora."""
def execute(self, loadbalancer, amphorae, amphora_index, def execute(self, loadbalancer, amphora_index, amphorae,
timeout_dict=None): timeout_dict=None):
"""Execute listener reload routines for listeners on an amphora.""" """Execute listener reload routines for listeners on an amphora."""
if loadbalancer.listeners: if loadbalancer.listeners:
@ -304,7 +304,7 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask): class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
"""Task to get and update the VRRP interface device name from amphora.""" """Task to get and update the VRRP interface device name from amphora."""
def execute(self, amphorae, amphora_index, timeout_dict=None): def execute(self, amphora_index, amphorae, timeout_dict=None):
amphora_id = amphorae[amphora_index].id amphora_id = amphorae[amphora_index].id
try: try:
interface = self.amphora_driver.get_interface_from_ip( interface = self.amphora_driver.get_interface_from_ip(
@ -376,15 +376,6 @@ class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id) LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
class AmphoraVRRPStop(BaseAmphoraTask):
"""Task to stop keepalived of all amphorae of a LB."""
def execute(self, loadbalancer):
self.amphora_driver.stop_vrrp_service(loadbalancer)
LOG.debug("Stopped VRRP of loadbalancer %s amphorae",
loadbalancer.id)
class AmphoraVRRPStart(BaseAmphoraTask): class AmphoraVRRPStart(BaseAmphoraTask):
"""Task to start keepalived on an amphora. """Task to start keepalived on an amphora.

View File

@ -13,6 +13,7 @@
# under the License. # under the License.
# #
from octavia_lib.common import constants as lib_consts
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
@ -21,10 +22,12 @@ from stevedore import driver as stevedore_driver
from taskflow.listeners import logging as tf_logging from taskflow.listeners import logging as tf_logging
import tenacity import tenacity
from octavia.amphorae.driver_exceptions import exceptions from octavia.amphorae.driver_exceptions import exceptions as driver_exc
from octavia.api.drivers import utils as provider_utils from octavia.api.drivers import utils as provider_utils
from octavia.common import base_taskflow from octavia.common import base_taskflow
from octavia.common import constants from octavia.common import constants
from octavia.common import exceptions
from octavia.common import utils
from octavia.controller.worker.v2.flows import flow_utils from octavia.controller.worker.v2.flows import flow_utils
from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver from octavia.controller.worker.v2 import taskflow_jobboard_driver as tsk_driver
from octavia.db import api as db_apis from octavia.db import api as db_apis
@ -33,17 +36,12 @@ from octavia.db import repositories as repo
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
RETRY_ATTEMPTS = 15
RETRY_INITIAL_DELAY = 1
RETRY_BACKOFF = 1
RETRY_MAX = 5
# We do not need to log retry exception information. Warning "Could not connect # We do not need to log retry exception information. Warning "Could not connect
# to instance" will be logged as usual. # to instance" will be logged as usual.
def retryMaskFilter(record): def retryMaskFilter(record):
if record.exc_info is not None and isinstance( if record.exc_info is not None and isinstance(
record.exc_info[1], exceptions.AmpConnectionRetry): record.exc_info[1], driver_exc.AmpConnectionRetry):
return False return False
return True return True
@ -87,8 +85,11 @@ class ControllerWorker(object):
tenacity.retry_if_result(_is_provisioning_status_pending_update) | tenacity.retry_if_result(_is_provisioning_status_pending_update) |
tenacity.retry_if_exception_type()), tenacity.retry_if_exception_type()),
wait=tenacity.wait_incrementing( wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def _get_db_obj_until_pending_update(self, repo, id): def _get_db_obj_until_pending_update(self, repo, id):
return repo.get(db_apis.get_session(), id=id) return repo.get(db_apis.get_session(), id=id)
@ -117,6 +118,7 @@ class ControllerWorker(object):
store = {constants.BUILD_TYPE_PRIORITY: store = {constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY, constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None, constants.FLAVOR: None,
constants.SERVER_GROUP_ID: None,
constants.AVAILABILITY_ZONE: None} constants.AVAILABILITY_ZONE: None}
if availability_zone: if availability_zone:
store[constants.AVAILABILITY_ZONE] = ( store[constants.AVAILABILITY_ZONE] = (
@ -145,8 +147,11 @@ class ControllerWorker(object):
@tenacity.retry( @tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing( wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_health_monitor(self, health_monitor): def create_health_monitor(self, health_monitor):
"""Creates a health monitor. """Creates a health monitor.
@ -251,8 +256,11 @@ class ControllerWorker(object):
@tenacity.retry( @tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing( wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_listener(self, listener): def create_listener(self, listener):
"""Creates a listener. """Creates a listener.
@ -292,14 +300,10 @@ class ControllerWorker(object):
:returns: None :returns: None
:raises ListenerNotFound: The referenced listener was not found :raises ListenerNotFound: The referenced listener was not found
""" """
# TODO(johnsom) Remove once the provider data model includes
# the project ID
lb = self._lb_repo.get(db_apis.get_session(),
id=listener[constants.LOADBALANCER_ID])
store = {constants.LISTENER: listener, store = {constants.LISTENER: listener,
constants.LOADBALANCER_ID: constants.LOADBALANCER_ID:
listener[constants.LOADBALANCER_ID], listener[constants.LOADBALANCER_ID],
constants.PROJECT_ID: lb.project_id} constants.PROJECT_ID: listener[constants.PROJECT_ID]}
self.run_flow( self.run_flow(
flow_utils.get_delete_listener_flow, flow_utils.get_delete_listener_flow,
store=store) store=store)
@ -325,8 +329,11 @@ class ControllerWorker(object):
@tenacity.retry( @tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing( wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_load_balancer(self, loadbalancer, flavor=None, def create_load_balancer(self, loadbalancer, flavor=None,
availability_zone=None): availability_zone=None):
"""Creates a load balancer by allocating Amphorae. """Creates a load balancer by allocating Amphorae.
@ -347,16 +354,18 @@ class ControllerWorker(object):
loadbalancer[constants.LOADBALANCER_ID]) loadbalancer[constants.LOADBALANCER_ID])
raise db_exceptions.NoResultFound raise db_exceptions.NoResultFound
# TODO(johnsom) convert this to octavia_lib constant flavor store = {lib_consts.LOADBALANCER_ID:
# once octavia is transitioned to use octavia_lib loadbalancer[lib_consts.LOADBALANCER_ID],
store = {constants.LOADBALANCER_ID:
loadbalancer[constants.LOADBALANCER_ID],
constants.BUILD_TYPE_PRIORITY: constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_NORMAL_PRIORITY, constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: flavor, lib_consts.FLAVOR: flavor,
constants.AVAILABILITY_ZONE: availability_zone} lib_consts.AVAILABILITY_ZONE: availability_zone}
topology = lb.topology topology = lb.topology
if (not CONF.nova.enable_anti_affinity or
topology == constants.TOPOLOGY_SINGLE):
store[constants.SERVER_GROUP_ID] = None
listeners_dicts = ( listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts( provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
lb.listeners) lb.listeners)
@ -377,17 +386,19 @@ class ControllerWorker(object):
:returns: None :returns: None
:raises LBNotFound: The referenced load balancer was not found :raises LBNotFound: The referenced load balancer was not found
""" """
db_lb = self._lb_repo.get(db_apis.get_session(), loadbalancer_id = load_balancer[constants.LOADBALANCER_ID]
id=load_balancer[constants.LOADBALANCER_ID]) db_lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer_id)
store = {constants.LOADBALANCER: load_balancer, store = {constants.LOADBALANCER: load_balancer,
constants.LOADBALANCER_ID: loadbalancer_id,
constants.SERVER_GROUP_ID: db_lb.server_group_id, constants.SERVER_GROUP_ID: db_lb.server_group_id,
constants.PROJECT_ID: db_lb.project_id} constants.PROJECT_ID: db_lb.project_id}
if cascade: if cascade:
store.update(flow_utils.get_delete_pools_store(db_lb)) listeners = flow_utils.get_listeners_on_lb(db_lb)
store.update(flow_utils.get_delete_listeners_store(db_lb)) pools = flow_utils.get_pools_on_lb(db_lb)
self.run_flow( self.run_flow(
flow_utils.get_cascade_delete_load_balancer_flow, flow_utils.get_cascade_delete_load_balancer_flow,
load_balancer, store=store) load_balancer, listeners, pools, store=store)
else: else:
self.run_flow( self.run_flow(
flow_utils.get_delete_load_balancer_flow, flow_utils.get_delete_load_balancer_flow,
@ -548,7 +559,6 @@ class ControllerWorker(object):
listeners_dicts = ( listeners_dicts = (
provider_utils.db_listeners_to_provider_dicts_list_of_dicts( provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
pool.listeners)) pool.listeners))
store = { store = {
constants.MEMBER: member, constants.MEMBER: member,
constants.LISTENERS: listeners_dicts, constants.LISTENERS: listeners_dicts,
@ -570,8 +580,11 @@ class ControllerWorker(object):
@tenacity.retry( @tenacity.retry(
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound), retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
wait=tenacity.wait_incrementing( wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) CONF.haproxy_amphora.api_db_commit_retry_backoff,
CONF.haproxy_amphora.api_db_commit_retry_max),
stop=tenacity.stop_after_attempt(
CONF.haproxy_amphora.api_db_commit_retry_attempts))
def create_pool(self, pool): def create_pool(self, pool):
"""Creates a node pool. """Creates a node pool.
@ -816,152 +829,249 @@ class ControllerWorker(object):
flow_utils.get_update_l7rule_flow, flow_utils.get_update_l7rule_flow,
store=store) store=store)
def _perform_amphora_failover(self, amp, priority): def failover_amphora(self, amphora_id):
"""Internal method to perform failover operations for an amphora. """Perform failover operations for an amphora.
:param amp: The amphora to failover Note: This expects the load balancer to already be in
:param priority: The create priority provisioning_status=PENDING_UPDATE state.
:param amphora_id: ID for amphora to failover
:returns: None :returns: None
:raises octavia.common.exceptions.NotFound: The referenced amphora was
not found
""" """
stored_params = {constants.FAILED_AMPHORA: amp.to_dict(), amphora = None
constants.LOADBALANCER_ID: amp.load_balancer_id, try:
constants.BUILD_TYPE_PRIORITY: priority, } amphora = self._amphora_repo.get(db_apis.get_session(),
id=amphora_id)
if amphora is None:
LOG.error('Amphora failover for amphora %s failed because '
'there is no record of this amphora in the '
'database. Check that the [house_keeping] '
'amphora_expiry_age configuration setting is not '
'too short. Skipping failover.', amphora_id)
raise exceptions.NotFound(resource=constants.AMPHORA,
id=amphora_id)
if amp.role in (constants.ROLE_MASTER, constants.ROLE_BACKUP): if amphora.status == constants.DELETED:
amp_role = 'master_or_backup'
elif amp.role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
elif amp.role is None:
amp_role = 'spare'
else:
amp_role = 'undefined'
LOG.info("Perform failover for an amphora: %s",
{"id": amp.id,
"load_balancer_id": amp.load_balancer_id,
"lb_network_ip": amp.lb_network_ip,
"compute_id": amp.compute_id,
"role": amp_role})
if amp.status == constants.DELETED:
LOG.warning('Amphora %s is marked DELETED in the database but ' LOG.warning('Amphora %s is marked DELETED in the database but '
'was submitted for failover. Deleting it from the ' 'was submitted for failover. Deleting it from the '
'amphora health table to exclude it from health ' 'amphora health table to exclude it from health '
'checks and skipping the failover.', amp.id) 'checks and skipping the failover.', amphora.id)
self._amphora_health_repo.delete(db_apis.get_session(), self._amphora_health_repo.delete(db_apis.get_session(),
amphora_id=amp.id) amphora_id=amphora.id)
return return
if (CONF.house_keeping.spare_amphora_pool_size == 0) and ( loadbalancer = None
CONF.nova.enable_anti_affinity is False): if amphora.load_balancer_id:
LOG.warning("Failing over amphora with no spares pool may " loadbalancer = self._lb_repo.get(db_apis.get_session(),
"cause delays in failover times while a new " id=amphora.load_balancer_id)
"amphora instance boots.") lb_amp_count = None
if loadbalancer:
if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
lb_amp_count = 2
elif loadbalancer.topology == constants.TOPOLOGY_SINGLE:
lb_amp_count = 1
# if we run with anti-affinity we need to set the server group az_metadata = {}
# as well flavor_dict = {}
lb = self._amphora_repo.get_lb_for_amphora( lb_id = None
db_apis.get_session(), amp.id) vip_dict = {}
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer( server_group_id = None
lb).to_dict() if lb else lb if loadbalancer:
if CONF.nova.enable_anti_affinity and lb: lb_id = loadbalancer.id
stored_params[constants.SERVER_GROUP_ID] = lb.server_group_id # Even if the LB doesn't have a flavor, create one and
if lb is not None and lb.flavor_id: # pass through the topology.
stored_params[constants.FLAVOR] = ( if loadbalancer.flavor_id:
self._flavor_repo.get_flavor_metadata_dict( flavor_dict = self._flavor_repo.get_flavor_metadata_dict(
db_apis.get_session(), lb.flavor_id)) db_apis.get_session(), loadbalancer.flavor_id)
flavor_dict[constants.LOADBALANCER_TOPOLOGY] = (
loadbalancer.topology)
else: else:
stored_params[constants.FLAVOR] = {} flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
if lb and lb.availability_zone: loadbalancer.topology}
if loadbalancer.availability_zone:
az_metadata = (
self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(),
loadbalancer.availability_zone))
vip_dict = loadbalancer.vip.to_dict()
server_group_id = loadbalancer.server_group_id
provider_lb_dict = (provider_utils.
db_loadbalancer_to_provider_loadbalancer)(
loadbalancer).to_dict() if loadbalancer else loadbalancer
stored_params = {constants.AVAILABILITY_ZONE: az_metadata,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.FLAVOR: flavor_dict,
constants.LOADBALANCER: provider_lb_dict,
constants.SERVER_GROUP_ID: server_group_id,
constants.LOADBALANCER_ID: lb_id,
constants.VIP: vip_dict}
self.services_controller.run_poster(
flow_utils.get_failover_amphora_flow,
amphora.to_dict(), lb_amp_count,
store=stored_params, wait=True)
LOG.info("Successfully completed the failover for an amphora: %s",
{"id": amphora_id,
"load_balancer_id": lb_id,
"lb_network_ip": amphora.lb_network_ip,
"compute_id": amphora.compute_id,
"role": amphora.role})
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.exception("Amphora %s failover exception: %s",
amphora_id, str(e))
self._amphora_repo.update(db_apis.get_session(),
amphora_id, status=constants.ERROR)
if amphora and amphora.load_balancer_id:
self._lb_repo.update(
db_apis.get_session(), amphora.load_balancer_id,
provisioning_status=constants.ERROR)
@staticmethod
def _get_amphorae_for_failover(load_balancer):
"""Returns an ordered list of amphora to failover.
:param load_balancer: The load balancer being failed over.
:returns: An ordered list of amphora to failover,
first amp to failover is last in the list
:raises octavia.common.exceptions.InvalidTopology: LB has an unknown
topology.
"""
if load_balancer.topology == constants.TOPOLOGY_SINGLE:
# In SINGLE topology, amp failover order does not matter
return [a.to_dict() for a in load_balancer.amphorae
if a.status != constants.DELETED]
if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
# In Active/Standby we should preference the standby amp
# for failover first in case the Active is still able to pass
# traffic.
# Note: The active amp can switch at any time and in less than a
# second, so this is "best effort".
amphora_driver = utils.get_amphora_driver()
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.failover_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.failover_connection_retry_interval}
amps = []
selected_amp = None
for amp in load_balancer.amphorae:
if amp.status == constants.DELETED:
continue
if selected_amp is None:
try:
if amphora_driver.get_interface_from_ip(
amp, load_balancer.vip.ip_address,
timeout_dict):
# This is a potential ACTIVE, add it to the list
amps.append(amp.to_dict())
else:
# This one doesn't have the VIP IP, so start
# failovers here.
selected_amp = amp
LOG.debug("Selected amphora %s as the initial "
"failover amphora.", amp.id)
except Exception:
# This amphora is broken, so start failovers here.
selected_amp = amp
else:
# We have already found a STANDBY, so add the rest to the
# list without querying them.
amps.append(amp.to_dict())
# Put the selected amphora at the end of the list so it is
# first to failover.
if selected_amp:
amps.append(selected_amp.to_dict())
return amps
LOG.error('Unknown load balancer topology found: %s, aborting '
'failover.', load_balancer.topology)
raise exceptions.InvalidTopology(topology=load_balancer.topology)
def failover_loadbalancer(self, load_balancer_id):
"""Perform failover operations for a load balancer.
Note: This expects the load balancer to already be in
provisioning_status=PENDING_UPDATE state.
:param load_balancer_id: ID for load balancer to failover
:returns: None
:raises octavia.commom.exceptions.NotFound: The load balancer was not
found.
"""
try:
lb = self._lb_repo.get(db_apis.get_session(),
id=load_balancer_id)
if lb is None:
raise exceptions.NotFound(resource=constants.LOADBALANCER,
id=load_balancer_id)
# Get the ordered list of amphorae to failover for this LB.
amps = self._get_amphorae_for_failover(lb)
if lb.topology == constants.TOPOLOGY_SINGLE:
if len(amps) != 1:
LOG.warning('%d amphorae found on load balancer %s where '
'one should exist. Repairing.', len(amps),
load_balancer_id)
elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
if len(amps) != 2:
LOG.warning('%d amphorae found on load balancer %s where '
'two should exist. Repairing.', len(amps),
load_balancer_id)
else:
LOG.error('Unknown load balancer topology found: %s, aborting '
'failover!', lb.topology)
raise exceptions.InvalidTopology(topology=lb.topology)
# We must provide a topology in the flavor definition
# here for the amphora to be created with the correct
# configuration.
if lb.flavor_id:
flavor = self._flavor_repo.get_flavor_metadata_dict(
db_apis.get_session(), lb.flavor_id)
flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
else:
flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}
provider_lb_dict = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
lb).to_dict() if lb else lb)
provider_lb_dict[constants.FLAVOR] = flavor
stored_params = {constants.LOADBALANCER: provider_lb_dict,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.SERVER_GROUP_ID: lb.server_group_id,
constants.LOADBALANCER_ID: lb.id,
constants.FLAVOR: flavor}
if lb.availability_zone:
stored_params[constants.AVAILABILITY_ZONE] = ( stored_params[constants.AVAILABILITY_ZONE] = (
self._az_repo.get_availability_zone_metadata_dict( self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(), lb.availability_zone)) db_apis.get_session(), lb.availability_zone))
else: else:
stored_params[constants.AVAILABILITY_ZONE] = {} stored_params[constants.AVAILABILITY_ZONE] = {}
self.run_flow( self.services_controller.run_poster(
flow_utils.get_failover_flow, flow_utils.get_failover_LB_flow, amps, provider_lb_dict,
role=amp.role, load_balancer=provider_lb,
store=stored_params, wait=True) store=stored_params, wait=True)
LOG.info("Successfully completed the failover for an amphora: %s", LOG.info('Failover of load balancer %s completed successfully.',
{"id": amp.id, lb.id)
"load_balancer_id": amp.load_balancer_id,
"lb_network_ip": amp.lb_network_ip,
"compute_id": amp.compute_id,
"role": amp_role})
def failover_amphora(self, amphora_id):
"""Perform failover operations for an amphora.
:param amphora_id: ID for amphora to failover
:returns: None
:raises AmphoraNotFound: The referenced amphora was not found
"""
try:
amp = self._amphora_repo.get(db_apis.get_session(),
id=amphora_id)
if not amp:
LOG.warning("Could not fetch Amphora %s from DB, ignoring "
"failover request.", amphora_id)
return
self._perform_amphora_failover(
amp, constants.LB_CREATE_FAILOVER_PRIORITY)
if amp.load_balancer_id:
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
amp.load_balancer_id)
self._lb_repo.update(
db_apis.get_session(), amp.load_balancer_id,
provisioning_status=constants.ACTIVE)
except Exception as e:
try:
self._lb_repo.update(
db_apis.get_session(), amp.load_balancer_id,
provisioning_status=constants.ERROR)
except Exception:
LOG.error("Unable to revert LB status to ERROR.")
with excutils.save_and_reraise_exception():
LOG.error("Amphora %(id)s failover exception: %(exc)s",
{'id': amphora_id, 'exc': e})
def failover_loadbalancer(self, load_balancer_id):
"""Perform failover operations for a load balancer.
:param load_balancer_id: ID for load balancer to failover
:returns: None
:raises LBNotFound: The referenced load balancer was not found
"""
# Note: This expects that the load balancer is already in
# provisioning_status=PENDING_UPDATE state
try:
lb = self._lb_repo.get(db_apis.get_session(),
id=load_balancer_id)
# Exclude amphora already deleted
amps = [a for a in lb.amphorae if a.status != constants.DELETED]
for amp in amps:
# failover amphora in backup role
# Note: this amp may not currently be the backup
# TODO(johnsom) Change this to query the amp state
# once the amp API supports it.
if amp.role == constants.ROLE_BACKUP:
self._perform_amphora_failover(
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
for amp in amps:
# failover everyhting else
if amp.role != constants.ROLE_BACKUP:
self._perform_amphora_failover(
amp, constants.LB_CREATE_ADMIN_FAILOVER_PRIORITY)
self._lb_repo.update(
db_apis.get_session(), load_balancer_id,
provisioning_status=constants.ACTIVE)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception(reraise=False):
LOG.error("LB %(lbid)s failover exception: %(exc)s", LOG.exception("LB %(lbid)s failover exception: %(exc)s",
{'lbid': load_balancer_id, 'exc': e}) {'lbid': load_balancer_id, 'exc': e})
self._lb_repo.update( self._lb_repo.update(
db_apis.get_session(), load_balancer_id, db_apis.get_session(), load_balancer_id,

View File

@ -1,4 +1,5 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2020 Red Hat, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -14,28 +15,27 @@
# #
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import graph_flow from taskflow.patterns import graph_flow
from taskflow.patterns import linear_flow from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow from taskflow.patterns import unordered_flow
from octavia.common import constants from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker.v2.tasks import amphora_driver_tasks from octavia.controller.worker.v2.tasks import amphora_driver_tasks
from octavia.controller.worker.v2.tasks import cert_task from octavia.controller.worker.v2.tasks import cert_task
from octavia.controller.worker.v2.tasks import compute_tasks from octavia.controller.worker.v2.tasks import compute_tasks
from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks
from octavia.controller.worker.v2.tasks import network_tasks from octavia.controller.worker.v2.tasks import network_tasks
from octavia.db import api as db_apis from octavia.controller.worker.v2.tasks import retry_tasks
from octavia.db import repositories as repo
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class AmphoraFlows(object): class AmphoraFlows(object):
def __init__(self):
self.lb_repo = repo.LoadBalancerRepository()
def get_create_amphora_flow(self): def get_create_amphora_flow(self):
"""Creates a flow to create an amphora. """Creates a flow to create an amphora.
@ -46,24 +46,16 @@ class AmphoraFlows(object):
provides=constants.AMPHORA_ID)) provides=constants.AMPHORA_ID))
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask( create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
requires=constants.AMPHORA_ID)) requires=constants.AMPHORA_ID))
if (CONF.controller_worker.amphora_driver ==
'amphora_haproxy_rest_driver'):
create_amphora_flow.add(cert_task.GenerateServerPEMTask( create_amphora_flow.add(cert_task.GenerateServerPEMTask(
provides=constants.SERVER_PEM)) provides=constants.SERVER_PEM))
create_amphora_flow.add( create_amphora_flow.add(
database_tasks.UpdateAmphoraDBCertExpiration( database_tasks.UpdateAmphoraDBCertExpiration(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
create_amphora_flow.add(compute_tasks.CertComputeCreate( create_amphora_flow.add(compute_tasks.CertComputeCreate(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM, requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.SERVER_GROUP_ID,
constants.BUILD_TYPE_PRIORITY, constants.FLAVOR), constants.BUILD_TYPE_PRIORITY, constants.FLAVOR),
provides=constants.COMPUTE_ID)) provides=constants.COMPUTE_ID))
else:
create_amphora_flow.add(compute_tasks.ComputeCreate(
requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR),
provides=constants.COMPUTE_ID))
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
create_amphora_flow.add(compute_tasks.ComputeActiveWait( create_amphora_flow.add(compute_tasks.ComputeActiveWait(
@ -97,11 +89,6 @@ class AmphoraFlows(object):
post_map_amp_to_lb = linear_flow.Flow( post_map_amp_to_lb = linear_flow.Flow(
sf_name) sf_name)
post_map_amp_to_lb.add(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMPHORA,
requires=constants.AMPHORA,
provides=constants.AMPHORA))
post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate( post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate(
name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK, name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK,
requires=(constants.AMPHORA, constants.FLAVOR))) requires=(constants.AMPHORA, constants.FLAVOR)))
@ -121,7 +108,7 @@ class AmphoraFlows(object):
return post_map_amp_to_lb return post_map_amp_to_lb
def _get_create_amp_for_lb_subflow(self, prefix, role): def _get_create_amp_for_lb_subflow(self, prefix, role, is_spare=False):
"""Create a new amphora for lb.""" """Create a new amphora for lb."""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
@ -131,12 +118,6 @@ class AmphoraFlows(object):
requires=constants.LOADBALANCER_ID, requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORA_ID)) provides=constants.AMPHORA_ID))
require_server_group_id_condition = (
role in (constants.ROLE_BACKUP, constants.ROLE_MASTER) and
CONF.nova.enable_anti_affinity)
if (CONF.controller_worker.amphora_driver ==
'amphora_haproxy_rest_driver'):
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask( create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
name=sf_name + '-' + constants.GENERATE_SERVER_PEM, name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
provides=constants.SERVER_PEM)) provides=constants.SERVER_PEM))
@ -146,52 +127,13 @@ class AmphoraFlows(object):
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION, name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
requires=(constants.AMPHORA_ID, constants.SERVER_PEM))) requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
if require_server_group_id_condition:
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate( create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE, name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
requires=( requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.AMPHORA_ID,
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY, constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID, constants.SERVER_GROUP_ID,
constants.FLAVOR, constants.FLAVOR, constants.AVAILABILITY_ZONE),
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID)) provides=constants.COMPUTE_ID))
else:
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
requires=(
constants.AMPHORA_ID,
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
if require_server_group_id_condition:
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
name=sf_name + '-' + constants.COMPUTE_CREATE,
requires=(
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
create_amp_for_lb_subflow.add(compute_tasks.ComputeCreate(
name=sf_name + '-' + constants.COMPUTE_CREATE,
requires=(
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId( create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID, name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
@ -207,6 +149,33 @@ class AmphoraFlows(object):
name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO, name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO,
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ), requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
provides=constants.AMPHORA)) provides=constants.AMPHORA))
create_amp_for_lb_subflow.add(self._retry_flow(sf_name))
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
requires=constants.AMPHORA))
if is_spare:
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraReadyInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_READY_INDB,
requires=constants.AMPHORA))
else:
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraAllocatedInDB(
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
if role == constants.ROLE_MASTER:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB(
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_BACKUP:
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB(
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
elif role == constants.ROLE_STANDALONE:
create_amp_for_lb_subflow.add(
database_tasks.MarkAmphoraStandAloneInDB(
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
return create_amp_for_lb_subflow return create_amp_for_lb_subflow
@ -268,7 +237,7 @@ class AmphoraFlows(object):
return create_amp_for_lb_subflow return create_amp_for_lb_subflow
def get_amphora_for_lb_subflow( def get_amphora_for_lb_subflow(
self, prefix, role=constants.ROLE_STANDALONE): self, prefix, role=constants.ROLE_STANDALONE, is_spare=False):
"""Tries to allocate a spare amphora to a loadbalancer if none """Tries to allocate a spare amphora to a loadbalancer if none
exists, create a new amphora. exists, create a new amphora.
@ -276,6 +245,14 @@ class AmphoraFlows(object):
sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW sf_name = prefix + '-' + constants.GET_AMPHORA_FOR_LB_SUBFLOW
# Don't replace a spare with another spare, just build a fresh one.
if is_spare:
get_spare_amp_flow = linear_flow.Flow(sf_name)
get_spare_amp_flow.add(self._get_create_amp_for_lb_subflow(
prefix, role, is_spare=is_spare))
return get_spare_amp_flow
# We need a graph flow here for a conditional flow # We need a graph flow here for a conditional flow
amp_for_lb_flow = graph_flow.Flow(sf_name) amp_for_lb_flow = graph_flow.Flow(sf_name)
@ -318,280 +295,136 @@ class AmphoraFlows(object):
decider=self._create_new_amp_for_lb_decider, decider=self._create_new_amp_for_lb_decider,
decider_depth='flow') decider_depth='flow')
# Plug the network
# todo(xgerman): Rework failover flow
if prefix != constants.FAILOVER_AMPHORA_FLOW:
sf_name = prefix + '-' + constants.AMP_PLUG_NET_SUBFLOW
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
amp_for_lb_net_flow.add(amp_for_lb_flow)
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
return amp_for_lb_net_flow
return amp_for_lb_flow return amp_for_lb_flow
def _get_amp_net_subflow(self, sf_name): def get_delete_amphora_flow(
flows = [] self, amphora,
flows.append(network_tasks.PlugVIPAmpphora( retry_attempts=CONF.controller_worker.amphora_delete_retries,
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA, retry_interval=(
requires=(constants.LOADBALANCER, constants.AMPHORA, CONF.controller_worker.amphora_delete_retry_interval)):
constants.SUBNET), """Creates a subflow to delete an amphora and it's port.
provides=constants.AMP_DATA))
flows.append(network_tasks.ApplyQosAmphora( This flow is idempotent and safe to retry.
name=sf_name + '-' + constants.APPLY_QOS_AMP,
requires=(constants.LOADBALANCER, constants.AMP_DATA,
constants.UPDATE_DICT)))
flows.append(database_tasks.UpdateAmphoraVIPData(
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
requires=constants.AMP_DATA))
flows.append(database_tasks.ReloadAmphora(
name=sf_name + '-' + constants.RELOAD_AMP_AFTER_PLUG_VIP,
requires=constants.AMPHORA,
provides=constants.AMPHORA))
flows.append(database_tasks.ReloadLoadBalancer(
name=sf_name + '-' + constants.RELOAD_LB_AFTER_PLUG_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
flows.append(network_tasks.GetAmphoraNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=(constants.LOADBALANCER, constants.AMPHORA),
provides=constants.AMPHORA_NETWORK_CONFIG))
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
rebind={constants.AMPHORAE_NETWORK_CONFIG:
constants.AMPHORA_NETWORK_CONFIG},
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
return flows
def get_delete_amphora_flow(self): :param amphora: An amphora dict object.
"""Creates a flow to delete an amphora. :param retry_attempts: The number of times the flow is retried.
:param retry_interval: The time to wait, in seconds, between retries.
This should be configurable in the config file :returns: The subflow for deleting the amphora.
:returns: The flow for deleting the amphora :raises AmphoraNotFound: The referenced Amphora was not found.
:raises AmphoraNotFound: The referenced Amphora was not found
""" """
amphora_id = amphora[constants.ID]
delete_amphora_flow = linear_flow.Flow(constants.DELETE_AMPHORA_FLOW) delete_amphora_flow = linear_flow.Flow(
name=constants.DELETE_AMPHORA_FLOW + '-' + amphora_id,
retry=retry_tasks.SleepingRetryTimesController(
name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' +
amphora_id,
attempts=retry_attempts, interval=retry_interval))
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask( delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA)) name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora_id,
delete_amphora_flow.add(database_tasks. inject={constants.AMPHORA: amphora}))
MarkAmphoraPendingDeleteInDB( delete_amphora_flow.add(
requires=constants.AMPHORA)) database_tasks.MarkAmphoraPendingDeleteInDB(
delete_amphora_flow.add(database_tasks. name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora_id,
MarkAmphoraHealthBusy( inject={constants.AMPHORA: amphora}))
requires=constants.AMPHORA)) delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy(
name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora_id,
inject={constants.AMPHORA: amphora}))
delete_amphora_flow.add(compute_tasks.ComputeDelete( delete_amphora_flow.add(compute_tasks.ComputeDelete(
requires=constants.AMPHORA)) name=constants.DELETE_AMPHORA + '-' + amphora_id,
delete_amphora_flow.add(database_tasks. inject={constants.AMPHORA: amphora,
DisableAmphoraHealthMonitoring( constants.PASSIVE_FAILURE: True}))
requires=constants.AMPHORA)) delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
delete_amphora_flow.add(database_tasks. name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora_id,
MarkAmphoraDeletedInDB( inject={constants.AMPHORA: amphora}))
requires=constants.AMPHORA)) delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
name=constants.MARK_AMPHORA_DELETED + '-' + amphora_id,
inject={constants.AMPHORA: amphora}))
if amphora.get(constants.VRRP_PORT_ID):
delete_amphora_flow.add(network_tasks.DeletePort(
name=(constants.DELETE_PORT + '-' + str(amphora_id) + '-' +
str(amphora[constants.VRRP_PORT_ID])),
inject={constants.PORT_ID: amphora[constants.VRRP_PORT_ID],
constants.PASSIVE_FAILURE: True}))
# TODO(johnsom) What about cleaning up any member ports?
# maybe we should get the list of attached ports prior to delete
# and call delete on them here. Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
return delete_amphora_flow return delete_amphora_flow
def get_failover_flow(self, role=constants.ROLE_STANDALONE, def get_vrrp_subflow(self, prefix, timeout_dict=None,
load_balancer=None): create_vrrp_group=True):
"""Creates a flow to failover a stale amphora
:returns: The flow for amphora failover
"""
failover_amphora_flow = linear_flow.Flow(
constants.FAILOVER_AMPHORA_FLOW)
failover_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
failover_amphora_flow.add(network_tasks.FailoverPreparationForAmphora(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
# Note: It seems intuitive to boot an amphora prior to deleting
# the old amphora, however this is a complicated issue.
# If the target host (due to anit-affinity) is resource
# constrained, this will fail where a post-delete will
# succeed. Since this is async with the API it would result
# in the LB ending in ERROR though the amps are still alive.
# Consider in the future making this a complicated
# try-on-failure-retry flow, or move upgrade failovers to be
# synchronous with the API. For now spares pool and act/stdby
# will mitigate most of this delay.
# Delete the old amphora
failover_amphora_flow.add(
database_tasks.MarkAmphoraPendingDeleteInDB(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
failover_amphora_flow.add(
database_tasks.MarkAmphoraHealthBusy(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
failover_amphora_flow.add(compute_tasks.ComputeDelete(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
failover_amphora_flow.add(network_tasks.WaitForPortDetach(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
failover_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
# If this is an unallocated amp (spares pool), we're done
if not load_balancer:
failover_amphora_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
return failover_amphora_flow
# Save failed amphora details for later
failover_amphora_flow.add(
database_tasks.GetAmphoraDetails(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA,
provides=constants.AMP_DATA))
# Get a new amphora
# Note: Role doesn't matter here. We will update it later.
get_amp_subflow = self.get_amphora_for_lb_subflow(
prefix=constants.FAILOVER_AMPHORA_FLOW)
failover_amphora_flow.add(get_amp_subflow)
# Update the new amphora with the failed amphora details
failover_amphora_flow.add(database_tasks.UpdateAmpFailoverDetails(
requires=(constants.AMPHORA, constants.AMP_DATA)))
# Update the data stored in the flow from the database
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
failover_amphora_flow.add(database_tasks.ReloadAmphora(
requires=constants.AMPHORA,
provides=constants.AMPHORA))
# Prepare to reconnect the network interface(s)
failover_amphora_flow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER,
provides=constants.AMPHORAE_NETWORK_CONFIG))
failover_amphora_flow.add(database_tasks.GetListenersFromLoadbalancer(
requires=constants.LOADBALANCER, provides=constants.LISTENERS))
failover_amphora_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER, provides=constants.AMPHORAE))
# Plug the VIP ports into the new amphora
# The reason for moving these steps here is the udp listeners want to
# do some kernel configuration before Listener update for forbidding
# failure during rebuild amphora.
failover_amphora_flow.add(network_tasks.PlugVIPPort(
requires=(constants.AMPHORA, constants.AMPHORAE_NETWORK_CONFIG)))
failover_amphora_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
requires=(constants.AMPHORA, constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_rety_interval}
# Setup parallel flows for each amp. We don't know the new amp
# details at flow creation time, so setup a subflow for each
# amp on the LB, they let the task index into a list of amps
# to find the amphora it should work on.
amp_index = 0
db_lb = self.lb_repo.get(db_apis.get_session(),
id=load_balancer[constants.LOADBALANCER_ID])
for amp in db_lb.amphorae:
if amp.status == constants.DELETED:
continue
update_amps_subflow.add(
amphora_driver_tasks.AmpListenersUpdate(
name=constants.AMP_LISTENER_UPDATE + '-' + str(amp_index),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
amp_index += 1
failover_amphora_flow.add(update_amps_subflow)
# Plug the member networks into the new amphora
failover_amphora_flow.add(network_tasks.CalculateAmphoraDelta(
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.AVAILABILITY_ZONE),
provides=constants.DELTA))
failover_amphora_flow.add(network_tasks.HandleNetworkDelta(
requires=(constants.AMPHORA, constants.DELTA),
provides=constants.ADDED_PORTS))
failover_amphora_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
failover_amphora_flow.add(database_tasks.ReloadLoadBalancer(
name='octavia-failover-LB-reload-2',
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
# Handle the amphora role and VRRP if necessary
if role == constants.ROLE_MASTER:
failover_amphora_flow.add(database_tasks.MarkAmphoraMasterInDB(
name=constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
vrrp_subflow = self.get_vrrp_subflow(role)
failover_amphora_flow.add(vrrp_subflow)
elif role == constants.ROLE_BACKUP:
failover_amphora_flow.add(database_tasks.MarkAmphoraBackupInDB(
name=constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
vrrp_subflow = self.get_vrrp_subflow(role)
failover_amphora_flow.add(vrrp_subflow)
elif role == constants.ROLE_STANDALONE:
failover_amphora_flow.add(
database_tasks.MarkAmphoraStandAloneInDB(
name=constants.MARK_AMP_STANDALONE_INDB,
requires=constants.AMPHORA))
failover_amphora_flow.add(amphora_driver_tasks.ListenersStart(
requires=(constants.LOADBALANCER, constants.AMPHORA)))
failover_amphora_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
rebind={constants.AMPHORA: constants.FAILED_AMPHORA},
requires=constants.AMPHORA))
return failover_amphora_flow
def get_vrrp_subflow(self, prefix):
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
vrrp_subflow = linear_flow.Flow(sf_name) vrrp_subflow = linear_flow.Flow(sf_name)
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG, # Optimization for failover flow. No reason to call this
requires=constants.LOADBALANCER, # when configuring the secondary amphora.
provides=constants.AMPHORAE_NETWORK_CONFIG)) if create_vrrp_group:
vrrp_subflow.add(amphora_driver_tasks.AmphoraUpdateVRRPInterface(
name=sf_name + '-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.LOADBALANCER,
provides=constants.LOADBALANCER))
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB( vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB, name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
requires=constants.LOADBALANCER, requires=constants.LOADBALANCER_ID))
provides=constants.LOADBALANCER))
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPUpdate( vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
name=sf_name + '-' + constants.AMP_VRRP_UPDATE, name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=(constants.LOADBALANCER, requires=constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG))) provides=constants.AMPHORAE_NETWORK_CONFIG))
vrrp_subflow.add(amphora_driver_tasks.AmphoraVRRPStart(
name=sf_name + '-' + constants.AMP_VRRP_START, # VRRP update needs to be run on all amphora to update
requires=constants.LOADBALANCER)) # their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')
# We have three tasks to run in order, per amphora
amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-0-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict},
provides=constants.AMP_VRRP_INT))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID,
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
constants.AMP_VRRP_INT),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
name=sf_name + '-1-' + constants.AMP_VRRP_START,
requires=constants.AMPHORAE,
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
update_amps_subflow.add(amp_0_subflow)
update_amps_subflow.add(amp_1_subflow)
vrrp_subflow.add(update_amps_subflow)
return vrrp_subflow return vrrp_subflow
def cert_rotate_amphora_flow(self): def cert_rotate_amphora_flow(self):
@ -644,3 +477,254 @@ class AmphoraFlows(object):
requires=(constants.AMPHORA, constants.FLAVOR))) requires=(constants.AMPHORA, constants.FLAVOR)))
return update_amphora_flow return update_amphora_flow
def get_amphora_for_lb_failover_subflow(
self, prefix, role=constants.ROLE_STANDALONE,
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False, is_spare=False):
"""Creates a new amphora that will be used in a failover flow.
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
:provides: amphora_id, amphora
:param prefix: The flow name prefix to use on the flow and tasks.
:param role: The role this amphora will have in the topology.
:param failed_amp_vrrp_port_id: The base port ID of the failed amp.
:param is_vrrp_ipv6: True if the base port IP is IPv6.
:param is_spare: True if we are getting a spare amphroa.
:return: A Taskflow sub-flow that will create the amphora.
"""
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW
amp_for_failover_flow = linear_flow.Flow(sf_name)
# Try to allocate or boot an amphora instance (unconfigured)
amp_for_failover_flow.add(self.get_amphora_for_lb_subflow(
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
role=role, is_spare=is_spare))
# If we are getting a spare amphora, this is all we need to do.
if is_spare:
return amp_for_failover_flow
# Create the VIP base (aka VRRP) port for the amphora.
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
requires=(constants.VIP, constants.VIP_SG_ID,
constants.AMPHORA_ID),
provides=constants.BASE_PORT))
# Attach the VIP base (aka VRRP) port to the amphora.
amp_for_failover_flow.add(compute_tasks.AttachPort(
name=prefix + '-' + constants.ATTACH_PORT,
requires=(constants.AMPHORA, constants.PORT),
rebind={constants.PORT: constants.BASE_PORT}))
# Update the amphora database record with the VIP base port info.
amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails(
name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT)))
# Update the amphora networking for the plugged VIP port
amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
provides=constants.AMPHORAE_NETWORK_CONFIG))
# Disable the base (vrrp) port on the failed amphora
# This prevents a DAD failure when bringing up the new amphora.
# Keepalived will handle this for act/stdby.
if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and
is_vrrp_ipv6):
amp_for_failover_flow.add(network_tasks.AdminDownPort(
name=prefix + '-' + constants.ADMIN_DOWN_PORT,
inject={constants.PORT_ID: failed_amp_vrrp_port_id}))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
requires=(constants.AMPHORA, constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
# Plug member ports
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.AVAILABILITY_ZONE, constants.VRRP_PORT),
rebind={constants.VRRP_PORT: constants.BASE_PORT},
provides=constants.DELTA))
amp_for_failover_flow.add(network_tasks.HandleNetworkDelta(
name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
requires=(constants.AMPHORA, constants.DELTA),
provides=constants.ADDED_PORTS))
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
requires=(constants.LOADBALANCER, constants.ADDED_PORTS)))
return amp_for_failover_flow
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
"""Get a Taskflow flow to failover an amphora.
1. Build a replacement amphora.
2. Delete the old amphora.
3. Update the amphorae listener configurations.
4. Update the VRRP configurations if needed.
:param failed_amphora: The amphora dict to failover.
:param lb_amp_count: The number of amphora on this load balancer.
:returns: The flow that will provide the failover.
"""
failover_amp_flow = linear_flow.Flow(
constants.FAILOVER_AMPHORA_FLOW)
# Revert amphora to status ERROR if this flow goes wrong
failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if failed_amphora[constants.ROLE] in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amphora[constants.ROLE] == constants.ROLE_STANDALONE:
amp_role = 'standalone'
elif failed_amphora[constants.ROLE] is None:
amp_role = 'spare'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amphora[constants.ID],
"load_balancer_id": failed_amphora.get(
constants.LOAD_BALANCER_ID),
"lb_network_ip": failed_amphora.get(constants.LB_NETWORK_IP),
"compute_id": failed_amphora.get(constants.COMPUTE_ID),
"role": amp_role})
failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID(
requires=constants.LOADBALANCER_ID,
provides=constants.VIP_SG_ID))
is_spare = True
is_vrrp_ipv6 = False
if failed_amphora.get(constants.LOAD_BALANCER_ID):
is_spare = False
if failed_amphora.get(constants.VRRP_IP):
is_vrrp_ipv6 = utils.is_ipv6(failed_amphora[constants.VRRP_IP])
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the
# compute service failing to boot an instance for other reasons.
# TODO(johnsom) Move this back out to run for spares after
# delete amphora API is available.
failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=failed_amphora[constants.ROLE],
failed_amp_vrrp_port_id=failed_amphora.get(
constants.VRRP_PORT_ID),
is_vrrp_ipv6=is_vrrp_ipv6,
is_spare=is_spare))
failover_amp_flow.add(
self.get_delete_amphora_flow(
failed_amphora,
retry_attempts=CONF.controller_worker.amphora_delete_retries,
retry_interval=(
CONF.controller_worker.amphora_delete_retry_interval)))
failover_amp_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amphora}))
if not failed_amphora.get(constants.LOAD_BALANCER_ID):
# This is an unallocated amphora (spares pool), we are done.
return failover_amp_flow
failover_amp_flow.add(database_tasks.GetLoadBalancer(
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora[constants.LOAD_BALANCER_ID]},
provides=constants.LOADBALANCER))
failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
name=constants.GET_AMPHORAE_FROM_LB,
requires=constants.LOADBALANCER_ID,
inject={constants.LOADBALANCER_ID:
failed_amphora[constants.LOAD_BALANCER_ID]},
provides=constants.AMPHORAE))
# Setup timeouts for our requests to the amphorae
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_rety_interval}
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
for amp_index in range(0, lb_amp_count):
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(update_amps_subflow)
# Configure and enable keepalived in the amphora
if lb_amp_count == 2:
failover_amp_flow.add(
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
timeout_dict, create_vrrp_group=False))
# Reload the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "U" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "W" cycle
reload_listener_subflow = unordered_flow.Flow(
constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)
for amp_index in range(0, lb_amp_count):
reload_listener_subflow.add(
amphora_driver_tasks.AmphoraIndexListenersReload(
name=(str(amp_index) + '-' +
constants.AMPHORA_RELOAD_LISTENER),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: amp_index,
constants.TIMEOUT_DICT: timeout_dict}))
failover_amp_flow.add(reload_listener_subflow)
# Remove any extraneous ports
# Note: Nova sometimes fails to delete ports attached to an instance.
# For example, if you create an LB with a listener, then
# 'openstack server delete' the amphora, you will see the vrrp
# port attached to that instance will remain after the instance
# is deleted.
# TODO(johnsom) Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
# Mark LB ACTIVE
failover_amp_flow.add(
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
requires=constants.LOADBALANCER))
return failover_amp_flow

View File

@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from octavia.common import constants from octavia.api.drivers import utils as provider_utils
from octavia.controller.worker.v2.flows import amphora_flows from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import health_monitor_flows from octavia.controller.worker.v2.flows import health_monitor_flows
from octavia.controller.worker.v2.flows import l7policy_flows from octavia.controller.worker.v2.flows import l7policy_flows
@ -41,16 +41,36 @@ def get_delete_load_balancer_flow(lb):
return LB_FLOWS.get_delete_load_balancer_flow(lb) return LB_FLOWS.get_delete_load_balancer_flow(lb)
def get_delete_listeners_store(lb): def get_listeners_on_lb(db_lb):
return LB_FLOWS.get_delete_listeners_store(lb) """Get a list of the listeners on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format listeners.
"""
listener_dicts = []
for listener in db_lb.listeners:
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
listener_dicts.append(prov_listener.to_dict())
return listener_dicts
def get_delete_pools_store(lb): def get_pools_on_lb(db_lb):
return LB_FLOWS.get_delete_pools_store(lb) """Get a list of the pools on a load balancer.
:param db_lb: A load balancer database model object.
:returns: A list of provider dict format pools.
"""
pool_dicts = []
for pool in db_lb.pools:
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
pool_dicts.append(prov_pool.to_dict())
return pool_dicts
def get_cascade_delete_load_balancer_flow(lb): def get_cascade_delete_load_balancer_flow(lb, listeners=(), pools=()):
return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb) return LB_FLOWS.get_cascade_delete_load_balancer_flow(lb, listeners,
pools)
def get_update_load_balancer_flow(): def get_update_load_balancer_flow():
@ -61,12 +81,17 @@ def get_create_amphora_flow():
return AMP_FLOWS.get_create_amphora_flow() return AMP_FLOWS.get_create_amphora_flow()
def get_delete_amphora_flow(): def get_delete_amphora_flow(amphora, retry_attempts=None, retry_interval=None):
return AMP_FLOWS.get_delete_amphora_flow() return AMP_FLOWS.get_delete_amphora_flow(amphora, retry_attempts,
retry_interval)
def get_failover_flow(role=constants.ROLE_STANDALONE, load_balancer=None): def get_failover_LB_flow(amps, lb):
return AMP_FLOWS.get_failover_flow(role=role, load_balancer=load_balancer) return LB_FLOWS.get_failover_LB_flow(amps, lb)
def get_failover_amphora_flow(amphora_dict, lb_amp_count):
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count)
def cert_rotate_amphora_flow(): def cert_rotate_amphora_flow():

View File

@ -83,24 +83,26 @@ class ListenerFlows(object):
return delete_listener_flow return delete_listener_flow
def get_delete_listener_internal_flow(self, listener_name): def get_delete_listener_internal_flow(self, listener):
"""Create a flow to delete a listener and l7policies internally """Create a flow to delete a listener and l7policies internally
(will skip deletion on the amp and marking LB active) (will skip deletion on the amp and marking LB active)
:returns: The flow for deleting a listener :returns: The flow for deleting a listener
""" """
delete_listener_flow = linear_flow.Flow(constants.DELETE_LISTENER_FLOW) listener_id = listener[constants.LISTENER_ID]
delete_listener_flow = linear_flow.Flow(
constants.DELETE_LISTENER_FLOW + '-' + listener_id)
# Should cascade delete all L7 policies # Should cascade delete all L7 policies
delete_listener_flow.add(network_tasks.UpdateVIPForDelete( delete_listener_flow.add(network_tasks.UpdateVIPForDelete(
name='delete_update_vip_' + listener_name, name='delete_update_vip_' + listener_id,
requires=constants.LOADBALANCER_ID)) requires=constants.LOADBALANCER_ID))
delete_listener_flow.add(database_tasks.DeleteListenerInDB( delete_listener_flow.add(database_tasks.DeleteListenerInDB(
name='delete_listener_in_db_' + listener_name, name='delete_listener_in_db_' + listener_id,
requires=constants.LISTENER, requires=constants.LISTENER,
rebind={constants.LISTENER: listener_name})) inject={constants.LISTENER: listener}))
delete_listener_flow.add(database_tasks.DecrementListenerQuota( delete_listener_flow.add(database_tasks.DecrementListenerQuota(
name='decrement_listener_quota_' + listener_name, name='decrement_listener_quota_' + listener_id,
requires=constants.PROJECT_ID)) requires=constants.PROJECT_ID))
return delete_listener_flow return delete_listener_flow

View File

@ -1,4 +1,5 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2020 Red Hat, Inc. All rights reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain # not use this file except in compliance with the License. You may obtain
@ -18,9 +19,9 @@ from oslo_log import log as logging
from taskflow.patterns import linear_flow from taskflow.patterns import linear_flow
from taskflow.patterns import unordered_flow from taskflow.patterns import unordered_flow
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants from octavia.common import constants
from octavia.common import exceptions from octavia.common import exceptions
from octavia.common import utils
from octavia.controller.worker.v2.flows import amphora_flows from octavia.controller.worker.v2.flows import amphora_flows
from octavia.controller.worker.v2.flows import listener_flows from octavia.controller.worker.v2.flows import listener_flows
from octavia.controller.worker.v2.flows import member_flows from octavia.controller.worker.v2.flows import member_flows
@ -30,7 +31,6 @@ from octavia.controller.worker.v2.tasks import compute_tasks
from octavia.controller.worker.v2.tasks import database_tasks from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks from octavia.controller.worker.v2.tasks import lifecycle_tasks
from octavia.controller.worker.v2.tasks import network_tasks from octavia.controller.worker.v2.tasks import network_tasks
from octavia.db import api as db_apis
from octavia.db import repositories as repo from octavia.db import repositories as repo
CONF = cfg.CONF CONF = cfg.CONF
@ -72,7 +72,7 @@ class LoadBalancerFlows(object):
requires=(constants.LOADBALANCER_ID, constants.VIP), requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER)) provides=constants.LOADBALANCER))
lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup( lb_create_flow.add(network_tasks.UpdateVIPSecurityGroup(
requires=constants.LOADBALANCER)) requires=constants.LOADBALANCER_ID))
lb_create_flow.add(network_tasks.GetSubnetFromVIP( lb_create_flow.add(network_tasks.GetSubnetFromVIP(
requires=constants.LOADBALANCER, requires=constants.LOADBALANCER,
provides=constants.SUBNET)) provides=constants.SUBNET))
@ -97,9 +97,15 @@ class LoadBalancerFlows(object):
return lb_create_flow return lb_create_flow
def _create_single_topology(self): def _create_single_topology(self):
return (self.amp_flows.get_amphora_for_lb_subflow( sf_name = (constants.ROLE_STANDALONE + '-' +
constants.AMP_PLUG_NET_SUBFLOW)
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
amp_for_lb_flow = self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_STANDALONE, prefix=constants.ROLE_STANDALONE,
role=constants.ROLE_STANDALONE), ) role=constants.ROLE_STANDALONE)
amp_for_lb_net_flow.add(amp_for_lb_flow)
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
return amp_for_lb_net_flow
def _create_active_standby_topology( def _create_active_standby_topology(
self, lf_name=constants.CREATE_LOADBALANCER_FLOW): self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
@ -128,16 +134,52 @@ class LoadBalancerFlows(object):
f_name = constants.CREATE_LOADBALANCER_FLOW f_name = constants.CREATE_LOADBALANCER_FLOW
amps_flow = unordered_flow.Flow(f_name) amps_flow = unordered_flow.Flow(f_name)
master_amp_sf = self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER
)
backup_amp_sf = self.amp_flows.get_amphora_for_lb_subflow( master_sf_name = (constants.ROLE_MASTER + '-' +
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP) constants.AMP_PLUG_NET_SUBFLOW)
master_amp_sf = linear_flow.Flow(master_sf_name)
master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name))
backup_sf_name = (constants.ROLE_BACKUP + '-' +
constants.AMP_PLUG_NET_SUBFLOW)
backup_amp_sf = linear_flow.Flow(backup_sf_name)
backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name))
amps_flow.add(master_amp_sf, backup_amp_sf) amps_flow.add(master_amp_sf, backup_amp_sf)
return flows + [amps_flow] return flows + [amps_flow]
def _get_amp_net_subflow(self, sf_name):
flows = []
flows.append(network_tasks.PlugVIPAmphora(
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
requires=(constants.LOADBALANCER, constants.AMPHORA,
constants.SUBNET),
provides=constants.AMP_DATA))
flows.append(network_tasks.ApplyQosAmphora(
name=sf_name + '-' + constants.APPLY_QOS_AMP,
requires=(constants.LOADBALANCER, constants.AMP_DATA,
constants.UPDATE_DICT)))
flows.append(database_tasks.UpdateAmphoraVIPData(
name=sf_name + '-' + constants.UPDATE_AMPHORA_VIP_DATA,
requires=constants.AMP_DATA))
flows.append(network_tasks.GetAmphoraNetworkConfigs(
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
requires=(constants.LOADBALANCER, constants.AMPHORA),
provides=constants.AMPHORA_NETWORK_CONFIG))
flows.append(amphora_driver_tasks.AmphoraPostVIPPlug(
name=sf_name + '-' + constants.AMP_POST_VIP_PLUG,
rebind={constants.AMPHORAE_NETWORK_CONFIG:
constants.AMPHORA_NETWORK_CONFIG},
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
return flows
def _create_listeners_flow(self): def _create_listeners_flow(self):
flows = [] flows = []
flows.append( flows.append(
@ -181,13 +223,6 @@ class LoadBalancerFlows(object):
created/allocated amphorae. created/allocated amphorae.
:return: Post amphorae association subflow :return: Post amphorae association subflow
""" """
# Note: If any task in this flow failed, the created amphorae will be
# left ''incorrectly'' allocated to the loadbalancer. Likely,
# the get_new_LB_networking_subflow is the most prune to failure
# shall deallocate the amphora from its loadbalancer and put it in a
# READY state.
sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW sf_name = prefix + '-' + constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
post_create_LB_flow = linear_flow.Flow(sf_name) post_create_LB_flow = linear_flow.Flow(sf_name)
post_create_LB_flow.add( post_create_LB_flow.add(
@ -197,6 +232,9 @@ class LoadBalancerFlows(object):
provides=constants.LOADBALANCER)) provides=constants.LOADBALANCER))
if topology == constants.TOPOLOGY_ACTIVE_STANDBY: if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE))
vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix) vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
post_create_LB_flow.add(vrrp_subflow) post_create_LB_flow.add(vrrp_subflow)
@ -208,36 +246,19 @@ class LoadBalancerFlows(object):
requires=constants.LOADBALANCER)) requires=constants.LOADBALANCER))
return post_create_LB_flow return post_create_LB_flow
def _get_delete_listeners_flow(self, lb): def _get_delete_listeners_flow(self, listeners):
"""Sets up an internal delete flow """Sets up an internal delete flow
Because task flow doesn't support loops we store each listener :param listeners: A list of listener dicts
we want to delete in the store part and then rebind :return: The flow for the deletion
:param lb: load balancer
:return: (flow, store) -- flow for the deletion and store with all
the listeners stored properly
""" """
listeners_delete_flow = unordered_flow.Flow('listener_delete_flow') listeners_delete_flow = unordered_flow.Flow('listeners_delete_flow')
db_lb = self.lb_repo.get(db_apis.get_session(), for listener in listeners:
id=lb[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
listener_name = 'listener_' + listener.id
listeners_delete_flow.add( listeners_delete_flow.add(
self.listener_flows.get_delete_listener_internal_flow( self.listener_flows.get_delete_listener_internal_flow(
listener_name)) listener))
return listeners_delete_flow return listeners_delete_flow
def get_delete_listeners_store(self, lb):
store = {}
for listener in lb.listeners:
listener_name = 'listener_' + listener.id
prov_listener = provider_utils.db_listener_to_provider_listener(
listener)
store[listener_name] = prov_listener.to_dict()
store.update({constants.LOADBALANCER_ID: lb.id,
constants.PROJECT_ID: lb.project_id})
return store
def get_delete_load_balancer_flow(self, lb): def get_delete_load_balancer_flow(self, lb):
"""Creates a flow to delete a load balancer. """Creates a flow to delete a load balancer.
@ -245,14 +266,7 @@ class LoadBalancerFlows(object):
""" """
return self._get_delete_load_balancer_flow(lb, False) return self._get_delete_load_balancer_flow(lb, False)
def get_delete_pools_store(self, lb): def _get_delete_pools_flow(self, pools):
store = {}
for pool in lb.pools:
pool_name = 'pool' + pool.id
store[pool_name] = pool.id
return store
def _get_delete_pools_flow(self, lb):
"""Sets up an internal delete flow """Sets up an internal delete flow
Because task flow doesn't support loops we store each pool Because task flow doesn't support loops we store each pool
@ -262,16 +276,14 @@ class LoadBalancerFlows(object):
the listeners stored properly the listeners stored properly
""" """
pools_delete_flow = unordered_flow.Flow('pool_delete_flow') pools_delete_flow = unordered_flow.Flow('pool_delete_flow')
db_lb = self.lb_repo.get(db_apis.get_session(), for pool in pools:
id=lb[constants.LOADBALANCER_ID])
for pool in db_lb.pools:
pool_name = 'pool' + pool.id
pools_delete_flow.add( pools_delete_flow.add(
self.pool_flows.get_delete_pool_flow_internal( self.pool_flows.get_delete_pool_flow_internal(
pool_name)) pool[constants.POOL_ID]))
return pools_delete_flow return pools_delete_flow
def _get_delete_load_balancer_flow(self, lb, cascade): def _get_delete_load_balancer_flow(self, lb, cascade,
listeners=(), pools=()):
delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW) delete_LB_flow = linear_flow.Flow(constants.DELETE_LOADBALANCER_FLOW)
delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask( delete_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER)) requires=constants.LOADBALANCER))
@ -280,8 +292,8 @@ class LoadBalancerFlows(object):
delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy( delete_LB_flow.add(database_tasks.MarkLBAmphoraeHealthBusy(
requires=constants.LOADBALANCER)) requires=constants.LOADBALANCER))
if cascade: if cascade:
listeners_delete = self._get_delete_listeners_flow(lb) listeners_delete = self._get_delete_listeners_flow(listeners)
pools_delete = self._get_delete_pools_flow(lb) pools_delete = self._get_delete_pools_flow(pools)
delete_LB_flow.add(pools_delete) delete_LB_flow.add(pools_delete)
delete_LB_flow.add(listeners_delete) delete_LB_flow.add(listeners_delete)
delete_LB_flow.add(network_tasks.UnplugVIP( delete_LB_flow.add(network_tasks.UnplugVIP(
@ -300,47 +312,14 @@ class LoadBalancerFlows(object):
requires=constants.PROJECT_ID)) requires=constants.PROJECT_ID))
return delete_LB_flow return delete_LB_flow
def get_cascade_delete_load_balancer_flow(self, lb): def get_cascade_delete_load_balancer_flow(self, lb, listeners, pools):
"""Creates a flow to delete a load balancer. """Creates a flow to delete a load balancer.
:returns: The flow for deleting a load balancer :returns: The flow for deleting a load balancer
""" """
return self._get_delete_load_balancer_flow(lb, True) return self._get_delete_load_balancer_flow(lb, True,
listeners=listeners,
def get_new_LB_networking_subflow(self): pools=pools)
"""Create a sub-flow to setup networking.
:returns: The flow to setup networking for a new amphora
"""
new_LB_net_subflow = linear_flow.Flow(constants.
LOADBALANCER_NETWORKING_SUBFLOW)
new_LB_net_subflow.add(network_tasks.AllocateVIP(
requires=constants.LOADBALANCER,
provides=constants.VIP))
new_LB_net_subflow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
new_LB_net_subflow.add(network_tasks.PlugVIP(
requires=constants.LOADBALANCER,
provides=constants.AMPS_DATA))
new_LB_net_subflow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
constants.UPDATE_DICT)))
new_LB_net_subflow.add(database_tasks.UpdateAmphoraeVIPData(
requires=constants.AMPS_DATA))
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
name=constants.RELOAD_LB_AFTER_PLUG_VIP,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
new_LB_net_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
requires=constants.LOADBALANCER,
provides=constants.AMPHORAE_NETWORK_CONFIG))
new_LB_net_subflow.add(amphora_driver_tasks.AmphoraePostVIPPlug(
requires=(constants.LOADBALANCER,
constants.AMPHORAE_NETWORK_CONFIG)))
return new_LB_net_subflow
def get_update_load_balancer_flow(self): def get_update_load_balancer_flow(self):
"""Creates a flow to update a load balancer. """Creates a flow to update a load balancer.
@ -360,3 +339,341 @@ class LoadBalancerFlows(object):
requires=constants.LOADBALANCER)) requires=constants.LOADBALANCER))
return update_LB_flow return update_LB_flow
def get_failover_LB_flow(self, amps, lb):
"""Failover a load balancer.
1. Validate the VIP port is correct and present.
2. Build a replacement amphora.
3. Delete the failed amphora.
4. Configure the replacement amphora listeners.
5. Configure VRRP for the listeners.
6. Build the second replacement amphora.
7. Delete the second failed amphora.
8. Delete any extraneous amphora.
9. Configure the listeners on the new amphorae.
10. Configure the VRRP on the new amphorae.
11. Reload the listener configurations to pick up VRRP changes.
12. Mark the load balancer back to ACTIVE.
:returns: The flow that will provide the failover.
"""
lb_topology = lb[constants.FLAVOR][constants.LOADBALANCER_TOPOLOGY]
# Pick one amphora to be failed over if any exist.
failed_amp = None
if amps:
failed_amp = amps.pop()
failover_LB_flow = linear_flow.Flow(
constants.FAILOVER_LOADBALANCER_FLOW)
# Revert LB to provisioning_status ERROR if this flow goes wrong
failover_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
requires=constants.LOADBALANCER))
# Setup timeouts for our requests to the amphorae
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_rety_interval}
if failed_amp:
failed_amp_role = failed_amp.get(constants.ROLE)
if failed_amp_role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amp_role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
elif failed_amp_role is None:
amp_role = 'spare'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amp.get(constants.ID),
"load_balancer_id": lb.get(constants.ID),
"lb_network_ip": failed_amp.get(constants.LB_NETWORK_IP),
"compute_id": failed_amp.get(constants.COMPUTE_ID),
"role": amp_role})
failover_LB_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Check that the VIP port exists and is ok
failover_LB_flow.add(
network_tasks.AllocateVIP(requires=constants.LOADBALANCER,
provides=constants.VIP))
# Update the database with the VIP information
failover_LB_flow.add(database_tasks.UpdateVIPAfterAllocation(
requires=(constants.LOADBALANCER_ID, constants.VIP),
provides=constants.LOADBALANCER))
# Make sure the SG has the correct rules and re-apply to the
# VIP port. It is not used on the VIP port, but will help lock
# the SG as in use.
failover_LB_flow.add(network_tasks.UpdateVIPSecurityGroup(
requires=constants.LOADBALANCER_ID, provides=constants.VIP_SG_ID))
new_amp_role = constants.ROLE_STANDALONE
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
new_amp_role = constants.ROLE_BACKUP
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the compute
# service failing to boot an instance for other reasons.
if failed_amp:
failed_vrrp_is_ipv6 = False
if failed_amp.get(constants.VRRP_IP):
failed_vrrp_is_ipv6 = utils.is_ipv6(
failed_amp[constants.VRRP_IP])
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=new_amp_role,
failed_amp_vrrp_port_id=failed_amp.get(
constants.VRRP_PORT_ID),
is_vrrp_ipv6=failed_vrrp_is_ipv6))
else:
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
role=new_amp_role))
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
name=constants.MARK_AMP_BACKUP_INDB,
requires=constants.AMPHORA))
# Delete the failed amp
if failed_amp:
failover_LB_flow.add(
self.amp_flows.get_delete_amphora_flow(failed_amp))
# Update the data stored in the flow from the database
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
# Configure the listener(s)
# We will run update on this amphora again later if this is
# an active/standby load balancer because we want this amp
# functional as soon as possible. It must run again to update
# the configurations for the new peers.
failover_LB_flow.add(amphora_driver_tasks.AmpListenersUpdate(
name=constants.AMP_LISTENER_UPDATE,
requires=(constants.LOADBALANCER, constants.AMPHORA),
inject={constants.TIMEOUT_DICT: timeout_dict}))
# Bring up the new "backup" amphora VIP now to reduce the outage
# on the final failover. This dropped the outage from 8-9 seconds
# to less than one in my lab.
# This does mean some steps have to be repeated later to reconfigure
# for the second amphora as a peer.
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
failover_LB_flow.add(database_tasks.CreateVRRPGroupForLB(
name=new_amp_role + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
requires=constants.LOADBALANCER_ID))
failover_LB_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
name=(new_amp_role + '-' +
constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID),
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
provides=constants.FIRST_AMP_NETWORK_CONFIGS))
failover_LB_flow.add(
amphora_driver_tasks.AmphoraUpdateVRRPInterface(
name=new_amp_role + '-' + constants.AMP_UPDATE_VRRP_INTF,
requires=constants.AMPHORA,
inject={constants.TIMEOUT_DICT: timeout_dict},
provides=constants.FIRST_AMP_VRRP_INTERFACE))
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPUpdate(
name=new_amp_role + '-' + constants.AMP_VRRP_UPDATE,
requires=(constants.LOADBALANCER_ID, constants.AMPHORA),
rebind={constants.AMPHORAE_NETWORK_CONFIG:
constants.FIRST_AMP_NETWORK_CONFIGS,
constants.AMP_VRRP_INT:
constants.FIRST_AMP_VRRP_INTERFACE},
inject={constants.TIMEOUT_DICT: timeout_dict}))
failover_LB_flow.add(amphora_driver_tasks.AmphoraVRRPStart(
name=new_amp_role + '-' + constants.AMP_VRRP_START,
requires=constants.AMPHORA,
inject={constants.TIMEOUT_DICT: timeout_dict}))
# Start the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "V" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "X" cycle
failover_LB_flow.add(amphora_driver_tasks.ListenersStart(
name=new_amp_role + '-' + constants.AMP_LISTENER_START,
requires=(constants.LOADBALANCER, constants.AMPHORA)))
# #### Work on standby amphora if needed #####
new_amp_role = constants.ROLE_MASTER
failed_amp = None
if amps:
failed_amp = amps.pop()
if failed_amp:
failed_amp_role = failed_amp.get(constants.ROLE)
if failed_amp_role in (constants.ROLE_MASTER,
constants.ROLE_BACKUP):
amp_role = 'master_or_backup'
elif failed_amp_role == constants.ROLE_STANDALONE:
amp_role = 'standalone'
elif failed_amp_role is None:
amp_role = 'spare'
else:
amp_role = 'undefined'
LOG.info("Performing failover for amphora: %s",
{"id": failed_amp.get(constants.ID),
"load_balancer_id": lb.get(constants.ID),
"lb_network_ip": failed_amp.get(
constants.LB_NETWORK_IP),
"compute_id": failed_amp.get(constants.COMPUTE_ID),
"role": amp_role})
failover_LB_flow.add(
database_tasks.MarkAmphoraPendingDeleteInDB(
name=(new_amp_role + '-' +
constants.MARK_AMPHORA_PENDING_DELETE),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
failover_LB_flow.add(database_tasks.MarkAmphoraHealthBusy(
name=(new_amp_role + '-' +
constants.MARK_AMPHORA_HEALTH_BUSY),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Get a replacement amphora and plug all of the networking.
#
# Do this early as the compute services have been observed to be
# unreliable. The community decided the chance that deleting first
# would open resources for an instance is less likely than the
# compute service failing to boot an instance for other reasons.
failover_LB_flow.add(
self.amp_flows.get_amphora_for_lb_failover_subflow(
prefix=(new_amp_role + '-' +
constants.FAILOVER_LOADBALANCER_FLOW),
role=new_amp_role))
failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
name=constants.MARK_AMP_MASTER_INDB,
requires=constants.AMPHORA))
# Delete the failed amp
if failed_amp:
failover_LB_flow.add(
self.amp_flows.get_delete_amphora_flow(
failed_amp))
failover_LB_flow.add(
database_tasks.DisableAmphoraHealthMonitoring(
name=(new_amp_role + '-' +
constants.DISABLE_AMP_HEALTH_MONITORING),
requires=constants.AMPHORA,
inject={constants.AMPHORA: failed_amp}))
# Remove any extraneous amphora
# Note: This runs in all topology situations.
# It should run before the act/stdby final listener update so
# that we don't bother attempting to update dead amphorae.
delete_extra_amps_flow = unordered_flow.Flow(
constants.DELETE_EXTRA_AMPHORAE_FLOW)
for amp in amps:
LOG.debug('Found extraneous amphora %s on load balancer %s. '
'Deleting.', amp.get(constants.ID), lb.get(id))
delete_extra_amps_flow.add(
self.amp_flows.get_delete_amphora_flow(amp))
failover_LB_flow.add(delete_extra_amps_flow)
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
# Update the data stored in the flow from the database
failover_LB_flow.add(database_tasks.ReloadLoadBalancer(
name=new_amp_role + '-' + constants.RELOAD_LB_AFTER_AMP_ASSOC,
requires=constants.LOADBALANCER_ID,
provides=constants.LOADBALANCER))
failover_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
name=new_amp_role + '-' + constants.GET_AMPHORAE_FROM_LB,
requires=constants.LOADBALANCER_ID,
provides=constants.AMPHORAE))
# Listeners update needs to be run on all amphora to update
# their peer configurations. So parallelize this with an
# unordered subflow.
update_amps_subflow = unordered_flow.Flow(
constants.UPDATE_AMPS_SUBFLOW)
# Setup parallel flows for each amp. We don't know the new amp
# details at flow creation time, so setup a subflow for each
# amp on the LB, they let the task index into a list of amps
# to find the amphora it should work on.
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=(constants.AMPHORA + '-0-' +
constants.AMP_LISTENER_UPDATE),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 0,
constants.TIMEOUT_DICT: timeout_dict}))
update_amps_subflow.add(
amphora_driver_tasks.AmphoraIndexListenerUpdate(
name=(constants.AMPHORA + '-1-' +
constants.AMP_LISTENER_UPDATE),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
failover_LB_flow.add(update_amps_subflow)
# Configure and enable keepalived in the amphora
failover_LB_flow.add(self.amp_flows.get_vrrp_subflow(
new_amp_role + '-' + constants.GET_VRRP_SUBFLOW,
timeout_dict, create_vrrp_group=False))
# #### End of standby ####
# Reload the listener. This needs to be done here because
# it will create the required haproxy check scripts for
# the VRRP deployed above.
# A "V" or newer amphora-agent will remove the need for this
# task here.
# TODO(johnsom) Remove this in the "X" cycle
failover_LB_flow.add(
amphora_driver_tasks.AmphoraIndexListenersReload(
name=(new_amp_role + '-' +
constants.AMPHORA_RELOAD_LISTENER),
requires=(constants.LOADBALANCER, constants.AMPHORAE),
inject={constants.AMPHORA_INDEX: 1,
constants.TIMEOUT_DICT: timeout_dict}))
# Remove any extraneous ports
# Note: Nova sometimes fails to delete ports attached to an instance.
# For example, if you create an LB with a listener, then
# 'openstack server delete' the amphora, you will see the vrrp
# port attached to that instance will remain after the instance
# is deleted.
# TODO(johnsom) Fix this as part of
# https://storyboard.openstack.org/#!/story/2007077
# Mark LB ACTIVE
failover_LB_flow.add(
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
requires=constants.LOADBALANCER))
return failover_LB_flow

View File

@ -74,22 +74,23 @@ class PoolFlows(object):
:returns: The flow for deleting a pool :returns: The flow for deleting a pool
""" """
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW) delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW + '-' +
pool_id)
# health monitor should cascade # health monitor should cascade
# members should cascade # members should cascade
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB( delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
name='mark_pool_pending_delete_in_db_' + pool_id, name='mark_pool_pending_delete_in_db_' + pool_id,
requires=constants.POOL_ID, requires=constants.POOL_ID,
rebind={constants.POOL_ID: pool_id})) inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota( delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
name='count_pool_children_for_quota_' + pool_id, name='count_pool_children_for_quota_' + pool_id,
requires=constants.POOL_ID, requires=constants.POOL_ID,
provides=constants.POOL_CHILD_COUNT, provides=constants.POOL_CHILD_COUNT,
rebind={constants.POOL_ID: pool_id})) inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.DeletePoolInDB( delete_pool_flow.add(database_tasks.DeletePoolInDB(
name='delete_pool_in_db_' + pool_id, name='delete_pool_in_db_' + pool_id,
requires=constants.POOL_ID, requires=constants.POOL_ID,
rebind={constants.POOL_ID: pool_id})) inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.DecrementPoolQuota( delete_pool_flow.add(database_tasks.DecrementPoolQuota(
name='decrement_pool_quota_' + pool_id, name='decrement_pool_quota_' + pool_id,
requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT])) requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT]))

View File

@ -23,7 +23,6 @@ from taskflow.types import failure
from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.amphorae.backends.agent import agent_jinja_cfg
from octavia.amphorae.driver_exceptions import exceptions as driver_except from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants from octavia.common import constants
from octavia.common import utils from octavia.common import utils
from octavia.controller.worker import task_utils as task_utilities from octavia.controller.worker import task_utils as task_utilities
@ -74,21 +73,46 @@ class AmpRetry(retry.Times):
class AmpListenersUpdate(BaseAmphoraTask): class AmpListenersUpdate(BaseAmphoraTask):
"""Task to update the listeners on one amphora.""" """Task to update the listeners on one amphora."""
def execute(self, loadbalancer, amphora, timeout_dict=None):
# Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let
# health manager fix it.
# TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora[constants.ID])
try:
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.update_amphora_listeners(
db_lb, db_amp, timeout_dict)
except Exception as e:
LOG.error('Failed to update listeners on amphora %s. Skipping '
'this amphora as it is failing to update due to: %s',
db_amp.id, str(e))
self.amphora_repo.update(db_apis.get_session(), db_amp.id,
status=constants.ERROR)
class AmphoraIndexListenerUpdate(BaseAmphoraTask):
"""Task to update the listeners on one amphora."""
def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()): def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()):
# Note, we don't want this to cause a revert as it may be used # Note, we don't want this to cause a revert as it may be used
# in a failover flow with both amps failing. Skip it and let # in a failover flow with both amps failing. Skip it and let
# health manager fix it. # health manager fix it.
try: try:
db_amphorae = [] # TODO(johnsom) Optimize this to use the dicts and not need the
for amp in amphorae: # DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(), db_amp = self.amphora_repo.get(
id=amp[constants.ID]) db_apis.get_session(),
db_amphorae.append(db_amp) id=amphorae[amphora_index][constants.ID])
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID]) id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.update_amphora_listeners( self.amphora_driver.update_amphora_listeners(
db_lb, db_amphorae[amphora_index], timeout_dict) db_lb, db_amp, timeout_dict)
except Exception as e: except Exception as e:
amphora_id = amphorae[amphora_index].get(constants.ID) amphora_id = amphorae[amphora_index].get(constants.ID)
LOG.error('Failed to update listeners on amphora %s. Skipping ' LOG.error('Failed to update listeners on amphora %s. Skipping '
@ -148,6 +172,35 @@ class ListenersStart(BaseAmphoraTask):
self.task_utils.mark_listener_prov_status_error(listener.id) self.task_utils.mark_listener_prov_status_error(listener.id)
class AmphoraIndexListenersReload(BaseAmphoraTask):
"""Task to reload all listeners on an amphora."""
def execute(self, loadbalancer, amphora_index, amphorae,
timeout_dict=None):
"""Execute listener reload routines for listeners on an amphora."""
if amphorae is None:
return
# TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(
db_apis.get_session(), id=amphorae[amphora_index][constants.ID])
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
if db_lb.listeners:
self.amphora_driver.reload(db_lb, db_amp, timeout_dict)
def revert(self, loadbalancer, *args, **kwargs):
"""Handle failed listeners reloads."""
LOG.warning("Reverting listener reload.")
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
for listener in db_lb.listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
class ListenerDelete(BaseAmphoraTask): class ListenerDelete(BaseAmphoraTask):
"""Task to delete the listener on the vip.""" """Task to delete the listener on the vip."""
@ -332,96 +385,149 @@ class AmphoraCertUpload(BaseAmphoraTask):
db_amp, fer.decrypt(server_pem.encode('utf-8'))) db_amp, fer.decrypt(server_pem.encode('utf-8')))
# TODO(johnsom) REMOVE ME!
class AmphoraUpdateVRRPInterface(BaseAmphoraTask): class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
"""Task to get and update the VRRP interface device name from amphora.""" """Task to get and update the VRRP interface device name from amphora."""
def execute(self, loadbalancer): def execute(self, amphora, timeout_dict=None):
"""Execute post_vip_routine."""
amps = []
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
timeout_dict = {
constants.CONN_MAX_RETRIES:
CONF.haproxy_amphora.active_connection_max_retries,
constants.CONN_RETRY_INTERVAL:
CONF.haproxy_amphora.active_connection_rety_interval}
for amp in filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
db_lb.amphorae):
try: try:
# TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora[constants.ID])
interface = self.amphora_driver.get_interface_from_ip( interface = self.amphora_driver.get_interface_from_ip(
amp, amp.vrrp_ip, timeout_dict=timeout_dict) db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict)
except Exception as e: except Exception as e:
# This can occur when an active/standby LB has no listener # This can occur when an active/standby LB has no listener
LOG.error('Failed to get amphora VRRP interface on amphora ' LOG.error('Failed to get amphora VRRP interface on amphora '
'%s. Skipping this amphora as it is failing due to: ' '%s. Skipping this amphora as it is failing due to: '
'%s', amp.id, str(e)) '%s', amphora.get(constants.ID), str(e))
self.amphora_repo.update(db_apis.get_session(), amp.id, self.amphora_repo.update(db_apis.get_session(),
amphora.get(constants.ID),
status=constants.ERROR) status=constants.ERROR)
continue return None
self.amphora_repo.update(db_apis.get_session(), amp.id, self.amphora_repo.update(db_apis.get_session(), amphora[constants.ID],
vrrp_interface=interface) vrrp_interface=interface)
amps.append(self.amphora_repo.get(db_apis.get_session(), return interface
id=amp.id))
db_lb.amphorae = amps
return provider_utils.db_loadbalancer_to_provider_loadbalancer(
db_lb).to_dict()
def revert(self, result, loadbalancer, *args, **kwargs):
"""Handle a failed amphora vip plug notification."""
if isinstance(result, failure.Failure):
return
LOG.warning("Reverting Get Amphora VRRP Interface.")
db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for amp in filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
db_lb.amphorae):
class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
"""Task to get and update the VRRP interface device name from amphora."""
def execute(self, amphora_index, amphorae, timeout_dict=None):
amphora_id = amphorae[amphora_index][constants.ID]
try: try:
self.amphora_repo.update(db_apis.get_session(), amp.id, # TODO(johnsom) Optimize this to use the dicts and not need the
vrrp_interface=None) # DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora_id)
interface = self.amphora_driver.get_interface_from_ip(
db_amp, db_amp.vrrp_ip, timeout_dict=timeout_dict)
except Exception as e: except Exception as e:
LOG.error("Failed to update amphora %(amp)s " # This can occur when an active/standby LB has no listener
"VRRP interface to None due to: %(except)s", LOG.error('Failed to get amphora VRRP interface on amphora '
{'amp': amp.id, 'except': e}) '%s. Skipping this amphora as it is failing due to: '
'%s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
return None
self.amphora_repo.update(db_apis.get_session(), amphora_id,
vrrp_interface=interface)
return interface
class AmphoraVRRPUpdate(BaseAmphoraTask): class AmphoraVRRPUpdate(BaseAmphoraTask):
"""Task to update the VRRP configuration of the loadbalancer amphorae.""" """Task to update the VRRP configuration of an amphora."""
def execute(self, loadbalancer, amphorae_network_config): def execute(self, loadbalancer_id, amphorae_network_config, amphora,
amp_vrrp_int, timeout_dict=None):
"""Execute update_vrrp_conf.""" """Execute update_vrrp_conf."""
db_lb = self.loadbalancer_repo.get( # Note, we don't want this to cause a revert as it may be used
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) # in a failover flow with both amps failing. Skip it and let
self.amphora_driver.update_vrrp_conf(db_lb, # health manager fix it.
amphorae_network_config) amphora_id = amphora[constants.ID]
LOG.debug("Uploaded VRRP configuration of loadbalancer %s amphorae", try:
loadbalancer[constants.LOADBALANCER_ID]) # TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora_id)
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
db_amp.vrrp_interface = amp_vrrp_int
self.amphora_driver.update_vrrp_conf(
loadbalancer, amphorae_network_config, db_amp, timeout_dict)
except Exception as e:
LOG.error('Failed to update VRRP configuration amphora %s. '
'Skipping this amphora as it is failing to update due '
'to: %s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
class AmphoraVRRPStop(BaseAmphoraTask): class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
"""Task to stop keepalived of all amphorae of a LB.""" """Task to update the VRRP configuration of an amphora."""
def execute(self, loadbalancer): def execute(self, loadbalancer_id, amphorae_network_config, amphora_index,
db_lb = self.loadbalancer_repo.get( amphorae, amp_vrrp_int, timeout_dict=None):
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) """Execute update_vrrp_conf."""
self.amphora_driver.stop_vrrp_service(db_lb) # Note, we don't want this to cause a revert as it may be used
LOG.debug("Stopped VRRP of loadbalancer %s amphorae", # in a failover flow with both amps failing. Skip it and let
loadbalancer[constants.LOADBALANCER_ID]) # health manager fix it.
amphora_id = amphorae[amphora_index][constants.ID]
try:
# TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora_id)
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
db_amp.vrrp_interface = amp_vrrp_int
self.amphora_driver.update_vrrp_conf(
loadbalancer, amphorae_network_config, db_amp, timeout_dict)
except Exception as e:
LOG.error('Failed to update VRRP configuration amphora %s. '
'Skipping this amphora as it is failing to update due '
'to: %s', amphora_id, str(e))
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR)
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
class AmphoraVRRPStart(BaseAmphoraTask): class AmphoraVRRPStart(BaseAmphoraTask):
"""Task to start keepalived of all amphorae of a LB.""" """Task to start keepalived on an amphora.
def execute(self, loadbalancer): This will reload keepalived if it is already running.
db_lb = self.loadbalancer_repo.get( """
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
self.amphora_driver.start_vrrp_service(db_lb) def execute(self, amphora, timeout_dict=None):
LOG.debug("Started VRRP of loadbalancer %s amphorae", # TODO(johnsom) Optimize this to use the dicts and not need the
loadbalancer[constants.LOADBALANCER_ID]) # DB lookups
db_amp = self.amphora_repo.get(
db_apis.get_session(), id=amphora[constants.ID])
self.amphora_driver.start_vrrp_service(db_amp, timeout_dict)
LOG.debug("Started VRRP on amphora %s.", amphora[constants.ID])
class AmphoraIndexVRRPStart(BaseAmphoraTask):
"""Task to start keepalived on an amphora.
This will reload keepalived if it is already running.
"""
def execute(self, amphora_index, amphorae, timeout_dict=None):
# TODO(johnsom) Optimize this to use the dicts and not need the
# DB lookups
db_amp = self.amphora_repo.get(
db_apis.get_session(), id=amphorae[amphora_index][constants.ID])
self.amphora_driver.start_vrrp_service(db_amp, timeout_dict)
LOG.debug("Started VRRP on amphora %s.",
amphorae[amphora_index][constants.ID])
class AmphoraComputeConnectivityWait(BaseAmphoraTask): class AmphoraComputeConnectivityWait(BaseAmphoraTask):

View File

@ -21,6 +21,7 @@ from oslo_log import log as logging
from stevedore import driver as stevedore_driver from stevedore import driver as stevedore_driver
from taskflow import task from taskflow import task
from taskflow.types import failure from taskflow.types import failure
import tenacity
from octavia.amphorae.backends.agent import agent_jinja_cfg from octavia.amphorae.backends.agent import agent_jinja_cfg
from octavia.common import constants from octavia.common import constants
@ -53,10 +54,9 @@ class BaseComputeTask(task.Task):
class ComputeCreate(BaseComputeTask): class ComputeCreate(BaseComputeTask):
"""Create the compute instance for a new amphora.""" """Create the compute instance for a new amphora."""
def execute(self, amphora_id, config_drive_files=None, def execute(self, amphora_id, server_group_id, config_drive_files=None,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None, ports=None, flavor=None, availability_zone=None):
availability_zone=None):
"""Create an amphora """Create an amphora
:param availability_zone: availability zone metadata dictionary :param availability_zone: availability zone metadata dictionary
@ -154,10 +154,9 @@ class ComputeCreate(BaseComputeTask):
class CertComputeCreate(ComputeCreate): class CertComputeCreate(ComputeCreate):
def execute(self, amphora_id, server_pem, def execute(self, amphora_id, server_pem, server_group_id,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None, ports=None, flavor=None, availability_zone=None):
availability_zone=None):
"""Create an amphora """Create an amphora
:param availability_zone: availability zone metadata dictionary :param availability_zone: availability zone metadata dictionary
@ -202,15 +201,52 @@ class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):
class ComputeDelete(BaseComputeTask): class ComputeDelete(BaseComputeTask):
def execute(self, amphora): @tenacity.retry(retry=tenacity.retry_if_exception_type(),
LOG.debug("Compute Delete execute for amphora with id %s", stop=tenacity.stop_after_attempt(CONF.compute.max_retries),
amphora.get(constants.ID)) wait=tenacity.wait_exponential(
multiplier=CONF.compute.retry_backoff,
min=CONF.compute.retry_interval,
max=CONF.compute.retry_max), reraise=True)
def execute(self, amphora, passive_failure=False):
amphora_id = amphora.get(constants.ID)
compute_id = amphora[constants.COMPUTE_ID]
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
LOG.debug('Compute delete execute for amphora with ID %s and '
'compute ID: %s', amphora_id, compute_id)
else:
LOG.warning('Retrying compute delete of %s attempt %s of %s.',
compute_id,
self.execute.retry.statistics[
constants.ATTEMPT_NUMBER],
self.execute.retry.stop.max_attempt_number)
# Let the Taskflow engine know we are working and alive
# Don't use get with a default for 'attempt_number', we need to fail
# if that number is missing.
self.update_progress(
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
self.execute.retry.stop.max_attempt_number)
try: try:
self.compute.delete(amphora[constants.COMPUTE_ID]) self.compute.delete(compute_id)
except Exception: except Exception:
LOG.exception("Compute delete for amphora id: %s failed", if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
amphora.get(constants.ID)) self.execute.retry.stop.max_attempt_number):
LOG.warning('Compute delete for amphora id: %s failed. '
'Retrying.', amphora_id)
raise
if passive_failure:
LOG.exception('Compute delete for compute ID: %s on amphora '
'ID: %s failed. This resource will be abandoned '
'and should manually be cleaned up once the '
'compute service is functional.',
compute_id, amphora_id)
else:
LOG.exception('Compute delete for compute ID: %s on amphora '
'ID: %s failed. The compute service has failed. '
'Aborting and reverting.', compute_id,
amphora_id)
raise raise
@ -284,3 +320,33 @@ class NovaServerGroupDelete(BaseComputeTask):
self.compute.delete_server_group(server_group_id) self.compute.delete_server_group(server_group_id)
else: else:
return return
class AttachPort(BaseComputeTask):
def execute(self, amphora, port):
"""Attach a port to an amphora instance.
:param amphora: The amphora to attach the port to.
:param port: The port to attach to the amphora.
:returns: None
"""
LOG.debug('Attaching port: %s to compute: %s',
port[constants.ID], amphora[constants.COMPUTE_ID])
self.compute.attach_network_or_port(amphora[constants.COMPUTE_ID],
port_id=port[constants.ID])
def revert(self, amphora, port, *args, **kwargs):
"""Revert our port attach.
:param amphora: The amphora to detach the port from.
:param port: The port to attach to the amphora.
"""
LOG.warning('Reverting port: %s attach to compute: %s',
port[constants.ID], amphora[constants.COMPUTE_ID])
try:
self.compute.detach_port(amphora[constants.COMPUTE_ID],
port[constants.ID])
except Exception as e:
LOG.error('Failed to detach port %s from compute %s for revert '
'due to %s.', port[constants.ID],
amphora[constants.COMPUTE_ID], str(e))

View File

@ -280,6 +280,7 @@ class DeleteListenerInDB(BaseDatabaseTask):
:returns: None :returns: None
""" """
# TODO(johnsom) Fix this, it doesn't revert anything
LOG.warning("Reverting mark listener delete in DB for listener id %s", LOG.warning("Reverting mark listener delete in DB for listener id %s",
listener[constants.LISTENER_ID]) listener[constants.LISTENER_ID])
@ -490,22 +491,26 @@ class UpdateAmphoraVIPData(BaseDatabaseTask):
class UpdateAmpFailoverDetails(BaseDatabaseTask): class UpdateAmpFailoverDetails(BaseDatabaseTask):
"""Update amphora failover details in the database.""" """Update amphora failover details in the database."""
def execute(self, amphora, amp_data): def execute(self, amphora, vip, base_port):
"""Update amphora failover details in the database. """Update amphora failover details in the database.
:param amphora: The amphora to update :param amphora: The amphora to update
:param amp_data: data_models.Amphora object with update data :param vip: The VIP object associated with this amphora.
:param base_port: The base port object associated with the amphora.
:returns: None :returns: None
""" """
# role and vrrp_priority will be updated later. # role and vrrp_priority will be updated later.
self.repos.amphora.update( self.repos.amphora.update(
db_apis.get_session(), db_apis.get_session(),
amphora.get(constants.ID), amphora.get(constants.ID),
vrrp_ip=amp_data[constants.VRRP_IP], # TODO(johnsom) We should do a better job getting the fixed_ip
ha_ip=amp_data[constants.HA_IP], # as this could be a problem with dual stack.
vrrp_port_id=amp_data[constants.VRRP_PORT_ID], # Fix this during the multi-vip patch.
ha_port_id=amp_data[constants.HA_PORT_ID], vrrp_ip=base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS],
vrrp_id=amp_data[constants.VRRP_ID]) ha_ip=vip[constants.IP_ADDRESS],
vrrp_port_id=base_port[constants.ID],
ha_port_id=vip[constants.PORT_ID],
vrrp_id=1)
class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
@ -595,26 +600,24 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask):
class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask): class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
"""Alter the amphora role and priority in DB.""" """Alter the amphora role and priority in DB."""
def _execute(self, amphora, amp_role, vrrp_priority): def _execute(self, amphora_id, amp_role, vrrp_priority):
"""Alter the amphora role and priority in DB. """Alter the amphora role and priority in DB.
:param amphora: Amphora to update. :param amphora_id: Amphora ID to update.
:param amp_role: Amphora role to be set. :param amp_role: Amphora role to be set.
:param vrrp_priority: VRRP priority to set. :param vrrp_priority: VRRP priority to set.
:returns: None :returns: None
""" """
LOG.debug("Mark %(role)s in DB for amphora: %(amp)s", LOG.debug("Mark %(role)s in DB for amphora: %(amp)s",
{constants.ROLE: amp_role, 'amp': amphora[constants.ID]}) {constants.ROLE: amp_role, 'amp': amphora_id})
self.amphora_repo.update(db_apis.get_session(), self.amphora_repo.update(db_apis.get_session(), amphora_id,
amphora[constants.ID], role=amp_role, vrrp_priority=vrrp_priority)
role=amp_role,
vrrp_priority=vrrp_priority)
def _revert(self, result, amphora, *args, **kwargs): def _revert(self, result, amphora_id, *args, **kwargs):
"""Removes role and vrrp_priority association. """Removes role and vrrp_priority association.
:param result: Result of the association. :param result: Result of the association.
:param amphora: Amphora which role/vrrp_priority association :param amphora_id: Amphora ID which role/vrrp_priority association
failed. failed.
:returns: None :returns: None
""" """
@ -623,17 +626,14 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
return return
LOG.warning("Reverting amphora role in DB for amp id %(amp)s", LOG.warning("Reverting amphora role in DB for amp id %(amp)s",
{'amp': amphora[constants.ID]}) {'amp': amphora_id})
try: try:
self.amphora_repo.update(db_apis.get_session(), self.amphora_repo.update(db_apis.get_session(), amphora_id,
amphora[constants.ID], role=None, vrrp_priority=None)
role=None,
vrrp_priority=None)
except Exception as e: except Exception as e:
LOG.error("Failed to update amphora %(amp)s " LOG.error("Failed to update amphora %(amp)s "
"role and vrrp_priority to None due to: " "role and vrrp_priority to None due to: "
"%(except)s", {'amp': amphora[constants.ID], "%(except)s", {'amp': amphora_id, 'except': e})
'except': e})
class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB): class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
@ -646,7 +646,8 @@ class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
:returns: None :returns: None
""" """
amp_role = constants.ROLE_MASTER amp_role = constants.ROLE_MASTER
self._execute(amphora, amp_role, constants.ROLE_MASTER_PRIORITY) self._execute(amphora[constants.ID], amp_role,
constants.ROLE_MASTER_PRIORITY)
def revert(self, result, amphora, *args, **kwargs): def revert(self, result, amphora, *args, **kwargs):
"""Removes amphora role association. """Removes amphora role association.
@ -654,7 +655,7 @@ class MarkAmphoraMasterInDB(_MarkAmphoraRoleAndPriorityInDB):
:param amphora: Amphora to update role. :param amphora: Amphora to update role.
:returns: None :returns: None
""" """
self._revert(result, amphora, *args, **kwargs) self._revert(result, amphora[constants.ID], *args, **kwargs)
class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB): class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB):
@ -667,7 +668,8 @@ class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB):
:returns: None :returns: None
""" """
amp_role = constants.ROLE_BACKUP amp_role = constants.ROLE_BACKUP
self._execute(amphora, amp_role, constants.ROLE_BACKUP_PRIORITY) self._execute(amphora[constants.ID], amp_role,
constants.ROLE_BACKUP_PRIORITY)
def revert(self, result, amphora, *args, **kwargs): def revert(self, result, amphora, *args, **kwargs):
"""Removes amphora role association. """Removes amphora role association.
@ -675,7 +677,7 @@ class MarkAmphoraBackupInDB(_MarkAmphoraRoleAndPriorityInDB):
:param amphora: Amphora to update role. :param amphora: Amphora to update role.
:returns: None :returns: None
""" """
self._revert(result, amphora, *args, **kwargs) self._revert(result, amphora[constants.ID], *args, **kwargs)
class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB): class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB):
@ -688,7 +690,7 @@ class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB):
:returns: None :returns: None
""" """
amp_role = constants.ROLE_STANDALONE amp_role = constants.ROLE_STANDALONE
self._execute(amphora, amp_role, None) self._execute(amphora[constants.ID], amp_role, None)
def revert(self, result, amphora, *args, **kwargs): def revert(self, result, amphora, *args, **kwargs):
"""Removes amphora role association. """Removes amphora role association.
@ -696,7 +698,7 @@ class MarkAmphoraStandAloneInDB(_MarkAmphoraRoleAndPriorityInDB):
:param amphora: Amphora to update role. :param amphora: Amphora to update role.
:returns: None :returns: None
""" """
self._revert(result, amphora, *args, **kwargs) self._revert(result, amphora[constants.ID], *args, **kwargs)
class MarkAmphoraAllocatedInDB(BaseDatabaseTask): class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
@ -809,10 +811,10 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
LOG.debug("Mark DELETED in DB for amphora: %(amp)s with " LOG.debug("Mark DELETED in DB for amphora: %(amp)s with "
"compute id %(comp)s", "compute id %(comp)s",
{'amp': amphora.get(constants.ID), {'amp': amphora[constants.ID],
'comp': amphora[constants.COMPUTE_ID]}) 'comp': amphora[constants.COMPUTE_ID]})
self.amphora_repo.update(db_apis.get_session(), self.amphora_repo.update(db_apis.get_session(),
amphora.get(constants.ID), amphora[constants.ID],
status=constants.DELETED) status=constants.DELETED)
def revert(self, amphora, *args, **kwargs): def revert(self, amphora, *args, **kwargs):
@ -824,10 +826,10 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
LOG.warning("Reverting mark amphora deleted in DB " LOG.warning("Reverting mark amphora deleted in DB "
"for amp id %(amp)s and compute id %(comp)s", "for amp id %(amp)s and compute id %(comp)s",
{'amp': amphora.get(constants.ID), {'amp': amphora[constants.ID],
'comp': amphora[constants.COMPUTE_ID]}) 'comp': amphora[constants.COMPUTE_ID]})
self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) self.task_utils.mark_amphora_status_error(amphora[constants.ID])
class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask): class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
@ -845,10 +847,10 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s " LOG.debug("Mark PENDING DELETE in DB for amphora: %(amp)s "
"with compute id %(id)s", "with compute id %(id)s",
{'amp': amphora.get(constants.ID), {'amp': amphora[constants.ID],
'id': amphora[constants.COMPUTE_ID]}) 'id': amphora[constants.COMPUTE_ID]})
self.amphora_repo.update(db_apis.get_session(), self.amphora_repo.update(db_apis.get_session(),
amphora.get(constants.ID), amphora[constants.ID],
status=constants.PENDING_DELETE) status=constants.PENDING_DELETE)
def revert(self, amphora, *args, **kwargs): def revert(self, amphora, *args, **kwargs):
@ -860,9 +862,9 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
LOG.warning("Reverting mark amphora pending delete in DB " LOG.warning("Reverting mark amphora pending delete in DB "
"for amp id %(amp)s and compute id %(comp)s", "for amp id %(amp)s and compute id %(comp)s",
{'amp': amphora.get(constants.ID), {'amp': amphora[constants.ID],
'comp': amphora[constants.COMPUTE_ID]}) 'comp': amphora[constants.COMPUTE_ID]})
self.task_utils.mark_amphora_status_error(amphora.get(constants.ID)) self.task_utils.mark_amphora_status_error(amphora[constants.ID])
class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask): class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask):
@ -1111,7 +1113,7 @@ class MarkLBActiveInDB(BaseDatabaseTask):
def _mark_member_status(self, member, status): def _mark_member_status(self, member, status):
self.member_repo.update( self.member_repo.update(
db_apis.get_session(), member[constants.MEMBER_ID], db_apis.get_session(), member.id,
provisioning_status=status) provisioning_status=status)
def revert(self, loadbalancer, *args, **kwargs): def revert(self, loadbalancer, *args, **kwargs):
@ -1691,17 +1693,17 @@ class GetAmphoraDetails(BaseDatabaseTask):
class GetAmphoraeFromLoadbalancer(BaseDatabaseTask): class GetAmphoraeFromLoadbalancer(BaseDatabaseTask):
"""Task to pull the listeners from a loadbalancer.""" """Task to pull the amphorae from a loadbalancer."""
def execute(self, loadbalancer): def execute(self, loadbalancer_id):
"""Pull the amphorae from a loadbalancer. """Pull the amphorae from a loadbalancer.
:param loadbalancer: Load balancer which listeners are required :param loadbalancer_id: Load balancer ID to get amphorae from
:returns: A list of Listener objects :returns: A list of Listener objects
""" """
amphorae = [] amphorae = []
db_lb = self.repos.load_balancer.get( db_lb = self.repos.load_balancer.get(db_apis.get_session(),
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) id=loadbalancer_id)
for amp in db_lb.amphorae: for amp in db_lb.amphorae:
a = self.amphora_repo.get(db_apis.get_session(), id=amp.id, a = self.amphora_repo.get(db_apis.get_session(), id=amp.id,
show_deleted=False) show_deleted=False)
@ -1746,29 +1748,45 @@ class GetVipFromLoadbalancer(BaseDatabaseTask):
return db_lb.vip.to_dict(recurse=True) return db_lb.vip.to_dict(recurse=True)
class GetLoadBalancer(BaseDatabaseTask):
"""Get an load balancer object from the database."""
def execute(self, loadbalancer_id, *args, **kwargs):
"""Get an load balancer object from the database.
:param loadbalancer_id: The load balancer ID to lookup
:returns: The load balancer object
"""
LOG.debug("Get load balancer from DB for load balancer id: %s",
loadbalancer_id)
db_lb = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
db_lb)
return provider_lb.to_dict()
class CreateVRRPGroupForLB(BaseDatabaseTask): class CreateVRRPGroupForLB(BaseDatabaseTask):
"""Create a VRRP group for a load balancer.""" """Create a VRRP group for a load balancer."""
def execute(self, loadbalancer): def execute(self, loadbalancer_id):
"""Create a VRRP group for a load balancer. """Create a VRRP group for a load balancer.
:param loadbalancer: Load balancer for which a VRRP group :param loadbalancer_id: Load balancer ID for which a VRRP group
should be created should be created
:returns: Updated load balancer
""" """
try: try:
self.repos.vrrpgroup.create( self.repos.vrrpgroup.create(
db_apis.get_session(), db_apis.get_session(),
load_balancer_id=loadbalancer[constants.LOADBALANCER_ID], load_balancer_id=loadbalancer_id,
vrrp_group_name=str( vrrp_group_name=str(loadbalancer_id).replace('-', ''),
loadbalancer[constants.LOADBALANCER_ID]).replace('-', ''),
vrrp_auth_type=constants.VRRP_AUTH_DEFAULT, vrrp_auth_type=constants.VRRP_AUTH_DEFAULT,
vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7], vrrp_auth_pass=uuidutils.generate_uuid().replace('-', '')[0:7],
advert_int=CONF.keepalived_vrrp.vrrp_advert_int) advert_int=CONF.keepalived_vrrp.vrrp_advert_int)
except odb_exceptions.DBDuplicateEntry: except odb_exceptions.DBDuplicateEntry:
LOG.debug('VRRP_GROUP entry already exists for load balancer, ' LOG.debug('VRRP_GROUP entry already exists for load balancer, '
'skipping create.') 'skipping create.')
return loadbalancer
class DisableAmphoraHealthMonitoring(BaseDatabaseTask): class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
@ -1784,7 +1802,7 @@ class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
:param amphora: The amphora to disable health monitoring for :param amphora: The amphora to disable health monitoring for
:returns: None :returns: None
""" """
self._delete_from_amp_health(amphora.get(constants.ID)) self._delete_from_amp_health(amphora[constants.ID])
class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask): class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask):
@ -1819,7 +1837,7 @@ class MarkAmphoraHealthBusy(BaseDatabaseTask):
:param amphora: The amphora to mark amphora health busy :param amphora: The amphora to mark amphora health busy
:returns: None :returns: None
""" """
self._mark_amp_health_busy(amphora.get(constants.ID)) self._mark_amp_health_busy(amphora[constants.ID])
class MarkLBAmphoraeHealthBusy(BaseDatabaseTask): class MarkLBAmphoraeHealthBusy(BaseDatabaseTask):

View File

@ -12,11 +12,14 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
import time
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils
from taskflow import task from taskflow import task
from taskflow.types import failure from taskflow.types import failure
import tenacity
from octavia.common import constants from octavia.common import constants
from octavia.common import data_models from octavia.common import data_models
@ -52,21 +55,27 @@ class CalculateAmphoraDelta(BaseNetworkTask):
default_provides = constants.DELTA default_provides = constants.DELTA
def execute(self, loadbalancer, amphora, availability_zone): def execute(self, loadbalancer, amphora, availability_zone,
vrrp_port=None):
LOG.debug("Calculating network delta for amphora id: %s", LOG.debug("Calculating network delta for amphora id: %s",
amphora.get(constants.ID)) amphora.get(constants.ID))
# Figure out what networks we want if vrrp_port is None:
# seed with lb network(s)
vrrp_port = self.network_driver.get_port( vrrp_port = self.network_driver.get_port(
amphora[constants.VRRP_PORT_ID]) amphora[constants.VRRP_PORT_ID])
vrrp_port_network_id = vrrp_port.network_id
else:
vrrp_port_network_id = vrrp_port[constants.NETWORK_ID]
# Figure out what networks we want
# seed with lb network(s)
if availability_zone: if availability_zone:
management_nets = ( management_nets = (
[availability_zone.get(constants.MANAGEMENT_NETWORK)] or [availability_zone.get(constants.MANAGEMENT_NETWORK)] or
CONF.controller_worker.amp_boot_network_list) CONF.controller_worker.amp_boot_network_list)
else: else:
management_nets = CONF.controller_worker.amp_boot_network_list management_nets = CONF.controller_worker.amp_boot_network_list
desired_network_ids = {vrrp_port.network_id}.union(management_nets) desired_network_ids = {vrrp_port_network_id}.union(management_nets)
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID])
for pool in db_lb.pools: for pool in db_lb.pools:
@ -84,7 +93,7 @@ class CalculateAmphoraDelta(BaseNetworkTask):
del_ids = set(actual_network_nics) - desired_network_ids del_ids = set(actual_network_nics) - desired_network_ids
delete_nics = list( delete_nics = list(
actual_network_nics[net_id] for net_id in del_ids) n_data_models.Interface(network_id=net_id) for net_id in del_ids)
add_ids = desired_network_ids - set(actual_network_nics) add_ids = desired_network_ids - set(actual_network_nics)
add_nics = list(n_data_models.Interface( add_nics = list(n_data_models.Interface(
@ -353,7 +362,8 @@ class PlugVIP(BaseNetworkTask):
LOG.debug("Plumbing VIP for loadbalancer id: %s", LOG.debug("Plumbing VIP for loadbalancer id: %s",
loadbalancer[constants.LOADBALANCER_ID]) loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
amps_data = self.network_driver.plug_vip(db_lb, amps_data = self.network_driver.plug_vip(db_lb,
db_lb.vip) db_lb.vip)
return [amp.to_dict() for amp in amps_data] return [amp.to_dict() for amp in amps_data]
@ -367,7 +377,8 @@ class PlugVIP(BaseNetworkTask):
loadbalancer[constants.LOADBALANCER_ID]) loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
try: try:
# Make sure we have the current port IDs for cleanup # Make sure we have the current port IDs for cleanup
for amp_data in result: for amp_data in result:
@ -388,13 +399,12 @@ class PlugVIP(BaseNetworkTask):
class UpdateVIPSecurityGroup(BaseNetworkTask): class UpdateVIPSecurityGroup(BaseNetworkTask):
"""Task to setup SG for LB.""" """Task to setup SG for LB."""
def execute(self, loadbalancer): def execute(self, loadbalancer_id):
"""Task to setup SG for LB.""" """Task to setup SG for LB."""
LOG.debug("Setup SG for loadbalancer id: %s", LOG.debug("Setup SG for loadbalancer id: %s", loadbalancer_id)
loadbalancer[constants.LOADBALANCER_ID])
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_apis.get_session(), id=loadbalancer_id)
self.network_driver.update_vip_sg(db_lb, db_lb.vip) self.network_driver.update_vip_sg(db_lb, db_lb.vip)
@ -411,7 +421,7 @@ class GetSubnetFromVIP(BaseNetworkTask):
loadbalancer['vip_subnet_id']).to_dict() loadbalancer['vip_subnet_id']).to_dict()
class PlugVIPAmpphora(BaseNetworkTask): class PlugVIPAmphora(BaseNetworkTask):
"""Task to plumb a VIP.""" """Task to plumb a VIP."""
def execute(self, loadbalancer, amphora, subnet): def execute(self, loadbalancer, amphora, subnet):
@ -561,13 +571,29 @@ class GetAmphoraNetworkConfigs(BaseNetworkTask):
return provider_dict return provider_dict
class GetAmphoraNetworkConfigsByID(BaseNetworkTask):
"""Task to retrieve amphora network details."""
def execute(self, loadbalancer_id, amphora_id=None):
LOG.debug("Retrieving vip network details.")
loadbalancer = self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
amphora = self.amphora_repo.get(db_apis.get_session(), id=amphora_id)
db_configs = self.network_driver.get_network_configs(loadbalancer,
amphora=amphora)
provider_dict = {}
for amp_id, amp_conf in db_configs.items():
provider_dict[amp_id] = amp_conf.to_dict(recurse=True)
return provider_dict
class GetAmphoraeNetworkConfigs(BaseNetworkTask): class GetAmphoraeNetworkConfigs(BaseNetworkTask):
"""Task to retrieve amphorae network details.""" """Task to retrieve amphorae network details."""
def execute(self, loadbalancer): def execute(self, loadbalancer_id):
LOG.debug("Retrieving vip network details.") LOG.debug("Retrieving vip network details.")
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), id=loadbalancer[constants.LOADBALANCER_ID]) db_apis.get_session(), id=loadbalancer_id)
db_configs = self.network_driver.get_network_configs(db_lb) db_configs = self.network_driver.get_network_configs(db_lb)
provider_dict = {} provider_dict = {}
for amp_id, amp_conf in db_configs.items(): for amp_id, amp_conf in db_configs.items():
@ -625,56 +651,13 @@ class PlugPorts(BaseNetworkTask):
self.network_driver.plug_port(db_amp, port) self.network_driver.plug_port(db_amp, port)
class PlugVIPPort(BaseNetworkTask):
"""Task to plug a VIP into a compute instance."""
def execute(self, amphora, amphorae_network_config):
vrrp_port = amphorae_network_config.get(
amphora.get(constants.ID))[constants.VRRP_PORT]
LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute '
'instance: %(compute_id)s.',
{constants.PORT_ID: vrrp_port.get(constants.ID),
constants.COMPUTE_ID: amphora[constants.COMPUTE_ID]})
db_vrrp_port = self.network_driver.get_port(
vrrp_port.get(constants.ID))
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora[constants.ID])
self.network_driver.plug_port(db_amp, db_vrrp_port)
def revert(self, result, amphora, amphorae_network_config,
*args, **kwargs):
vrrp_port = None
try:
vrrp_port = amphorae_network_config.get(
amphora.get(constants.ID))[constants.VRRP_PORT]
db_vrrp_port = self.network_driver.get_port(
vrrp_port.get(constants.ID))
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora[constants.ID])
self.network_driver.unplug_port(db_amp, db_vrrp_port)
except Exception:
LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: '
'%(amp)s',
{'port': vrrp_port, 'amp': amphora[constants.ID]})
class WaitForPortDetach(BaseNetworkTask):
"""Task to wait for the neutron ports to detach from an amphora."""
def execute(self, amphora):
LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.',
{'amp_id': amphora.get(constants.ID)})
db_amp = self.amphora_repo.get(db_apis.get_session(),
id=amphora.get(constants.ID))
self.network_driver.wait_for_port_detach(db_amp)
class ApplyQos(BaseNetworkTask): class ApplyQos(BaseNetworkTask):
"""Apply Quality of Services to the VIP""" """Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id, def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
is_revert=False, request_qos_id=None): is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports.""" """Call network driver to apply QoS Policy on the vrrp ports."""
if not amps_data: if not amps_data:
db_lb = self.loadbalancer_repo.get( db_lb = self.loadbalancer_repo.get(
db_apis.get_session(), db_apis.get_session(),
@ -688,12 +671,21 @@ class ApplyQos(BaseNetworkTask):
def execute(self, loadbalancer, amps_data=None, update_dict=None): def execute(self, loadbalancer, amps_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip.""" """Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer['vip_qos_policy_id'] db_lb = self.loadbalancer_repo.get(
db_apis.get_session(),
id=loadbalancer[constants.LOADBALANCER_ID])
qos_policy_id = db_lb.vip.qos_policy_id
if not qos_policy_id and ( if not qos_policy_id and (
not update_dict or ( not update_dict or (
'vip' not in update_dict or 'vip' not in update_dict or
'qos_policy_id' not in update_dict[constants.VIP])): 'qos_policy_id' not in update_dict[constants.VIP])):
return return
if update_dict and update_dict.get(constants.VIP):
vip_dict = update_dict[constants.VIP]
if vip_dict.get(constants.QOS_POLICY_ID):
qos_policy_id = vip_dict[constants.QOS_POLICY_ID]
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id) self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
def revert(self, result, loadbalancer, amps_data=None, update_dict=None, def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
@ -756,3 +748,147 @@ class ApplyQosAmphora(BaseNetworkTask):
LOG.error('Failed to remove QoS policy: %s from port: %s due ' LOG.error('Failed to remove QoS policy: %s from port: %s due '
'to error: %s', orig_qos_id, 'to error: %s', orig_qos_id,
amp_data[constants.VRRP_PORT_ID], e) amp_data[constants.VRRP_PORT_ID], e)
class DeletePort(BaseNetworkTask):
"""Task to delete a network port."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, port_id, passive_failure=False):
"""Delete the network port."""
if port_id is None:
return
if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
LOG.debug("Deleting network port %s", port_id)
else:
LOG.warning('Retrying network port %s delete attempt %s of %s.',
port_id,
self.execute.retry.statistics[
constants.ATTEMPT_NUMBER],
self.execute.retry.stop.max_attempt_number)
# Let the Taskflow engine know we are working and alive
# Don't use get with a default for 'attempt_number', we need to fail
# if that number is missing.
self.update_progress(
self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
self.execute.retry.stop.max_attempt_number)
try:
self.network_driver.delete_port(port_id)
except Exception:
if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
self.execute.retry.stop.max_attempt_number):
LOG.warning('Network port delete for port id: %s failed. '
'Retrying.', port_id)
raise
if passive_failure:
LOG.exception('Network port delete for port ID: %s failed. '
'This resource will be abandoned and should '
'manually be cleaned up once the '
'network service is functional.', port_id)
# Let's at least attempt to disable it so if the instance
# comes back from the dead it doesn't conflict with anything.
try:
self.network_driver.admin_down_port(port_id)
LOG.info('Successfully disabled (admin down) network port '
'%s that failed to delete.', port_id)
except Exception:
LOG.warning('Attempt to disable (admin down) network port '
'%s failed. The network service has failed. '
'Continuing.', port_id)
else:
LOG.exception('Network port delete for port ID: %s failed. '
'The network service has failed. '
'Aborting and reverting.', port_id)
raise
class CreateVIPBasePort(BaseNetworkTask):
"""Task to create the VIP base port for an amphora."""
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
stop=tenacity.stop_after_attempt(
CONF.networking.max_retries),
wait=tenacity.wait_exponential(
multiplier=CONF.networking.retry_backoff,
min=CONF.networking.retry_interval,
max=CONF.networking.retry_max), reraise=True)
def execute(self, vip, vip_sg_id, amphora_id):
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
fixed_ips = [{constants.SUBNET_ID: vip[constants.SUBNET_ID]}]
sg_id = []
if vip_sg_id:
sg_id = [vip_sg_id]
port = self.network_driver.create_port(
vip[constants.NETWORK_ID], name=port_name, fixed_ips=fixed_ips,
secondary_ips=[vip[constants.IP_ADDRESS]],
security_group_ids=sg_id,
qos_policy_id=vip[constants.QOS_POLICY_ID])
LOG.info('Created port %s with ID %s for amphora %s',
port_name, port.id, amphora_id)
return port.to_dict(recurse=True)
def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
for port in result:
self.network_driver.delete_port(port.id)
LOG.info('Deleted port %s with ID %s for amphora %s due to a '
'revert.', port_name, port.id, amphora_id)
except Exception as e:
LOG.error('Failed to delete port %s. Resources may still be in '
'use for a port intended for amphora %s due to error '
'%s. Search for a port named %s',
result, amphora_id, str(e), port_name)
class AdminDownPort(BaseNetworkTask):
def execute(self, port_id):
try:
self.network_driver.set_port_admin_state_up(port_id, False)
except base.PortNotFound:
return
for i in range(CONF.networking.max_retries):
port = self.network_driver.get_port(port_id)
if port.status == constants.DOWN:
LOG.debug('Disabled port: %s', port_id)
return
LOG.debug('Port %s is %s instead of DOWN, waiting.',
port_id, port.status)
time.sleep(CONF.networking.retry_interval)
LOG.error('Port %s failed to go DOWN. Port status is still %s. '
'Ignoring and continuing.', port_id, port.status)
def revert(self, result, port_id, *args, **kwargs):
if isinstance(result, failure.Failure):
return
try:
self.network_driver.set_port_admin_state_up(port_id, True)
except Exception as e:
LOG.error('Failed to bring port %s admin up on revert due to: %s.',
port_id, str(e))
class GetVIPSecurityGroupID(BaseNetworkTask):
def execute(self, loadbalancer_id):
sg_name = utils.get_vip_security_group_name(loadbalancer_id)
try:
security_group = self.network_driver.get_security_group(sg_name)
if security_group:
return security_group.id
except base.SecurityGroupNotFound:
with excutils.save_and_reraise_exception() as ctxt:
if self.network_driver.sec_grp_enabled:
LOG.error('VIP security group %s was not found.', sg_name)
else:
ctxt.reraise = False
return None

View File

@ -0,0 +1,73 @@
# Copyright 2019 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from taskflow import retry
LOG = logging.getLogger(__name__)
class SleepingRetryTimesController(retry.Times):
"""A retry controller to attempt subflow retries a number of times.
This retry controller overrides the Times on_failure to inject a
sleep interval between retries.
It also adds a log message when all of the retries are exhausted.
:param attempts: number of attempts to retry the associated subflow
before giving up
:type attempts: int
:param name: Meaningful name for this atom, should be something that is
distinguishable and understandable for notification,
debugging, storing and any other similar purposes.
:param provides: A set, string or list of items that
this will be providing (or could provide) to others, used
to correlate and associate the thing/s this atom
produces, if it produces anything at all.
:param requires: A set or list of required inputs for this atom's
``execute`` method.
:param rebind: A dict of key/value pairs used to define argument
name conversions for inputs to this atom's ``execute``
method.
:param revert_all: when provided this will cause the full flow to revert
when the number of attempts that have been tried
has been reached (when false, it will only locally
revert the associated subflow)
:type revert_all: bool
:param interval: Interval, in seconds, between retry attempts.
:type interval: int
"""
def __init__(self, attempts=1, name=None, provides=None, requires=None,
auto_extract=True, rebind=None, revert_all=False, interval=1):
super().__init__(attempts, name, provides, requires, auto_extract,
rebind, revert_all)
self._interval = interval
def on_failure(self, history, *args, **kwargs):
if len(history) < self._attempts:
LOG.warning('%s attempt %s of %s failed. Sleeping %s seconds and '
'retrying.',
self.name[self.name.startswith('retry-') and
len('retry-'):], len(history),
self._attempts, self._interval)
time.sleep(self._interval)
return retry.RETRY
return self._revert_action
def revert(self, history, *args, **kwargs):
LOG.error('%s retries with interval %s seconds have failed for %s. '
'Giving up.', len(history), self._interval, self.name)

View File

@ -315,21 +315,9 @@ class TestAmphoraFlows(base.TestCase):
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
# TODO(johnsom) Uncomment after amphora failover builds a replacement
# amphora.
# self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
# self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
# self.assertIn(constants.FLAVOR, amp_flow.requires)
# self.assertEqual(5, len(amp_flow.requires))
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
# self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
# self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
# self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
# self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
# self.assertIn(constants.SERVER_PEM, amp_flow.provides)
# self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
# self.assertEqual(6, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires)) self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(1, len(amp_flow.provides))
@ -340,14 +328,10 @@ class TestAmphoraFlows(base.TestCase):
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
# TODO(johnsom) Uncomment after amphora failover builds a replacement
# amphora.
# self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
# self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
# self.assertEqual(5, len(amp_flow.requires))
# self.assertEqual(6, len(amp_flow.provides))
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(1, len(amp_flow.requires)) self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(1, len(amp_flow.provides))

View File

@ -203,13 +203,13 @@ class TestAmphoraDriverTasks(base.TestCase):
# Test no listeners # Test no listeners
mock_lb.listeners = None mock_lb.listeners = None
listeners_reload_obj.execute(mock_lb, None, 0) listeners_reload_obj.execute(mock_lb, 0, None)
mock_driver.reload.assert_not_called() mock_driver.reload.assert_not_called()
# Test with listeners # Test with listeners
mock_driver.start.reset_mock() mock_driver.start.reset_mock()
mock_lb.listeners = [mock_listener] mock_lb.listeners = [mock_listener]
listeners_reload_obj.execute(mock_lb, [amphora_mock], 0, listeners_reload_obj.execute(mock_lb, 0, [amphora_mock],
timeout_dict=self.timeout_dict) timeout_dict=self.timeout_dict)
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock, mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
self.timeout_dict) self.timeout_dict)
@ -620,7 +620,7 @@ class TestAmphoraDriverTasks(base.TestCase):
amphora_update_vrrp_interface_obj = ( amphora_update_vrrp_interface_obj = (
amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface()) amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface())
amphora_update_vrrp_interface_obj.execute( amphora_update_vrrp_interface_obj.execute(
[_amphora_mock], 0, timeout_dict) 0, [_amphora_mock], timeout_dict)
mock_driver.get_interface_from_ip.assert_called_once_with( mock_driver.get_interface_from_ip.assert_called_once_with(
_amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict) _amphora_mock, _amphora_mock.vrrp_ip, timeout_dict=timeout_dict)
mock_amphora_repo_update.assert_called_once_with( mock_amphora_repo_update.assert_called_once_with(
@ -629,7 +629,7 @@ class TestAmphoraDriverTasks(base.TestCase):
# Test with an exception # Test with an exception
mock_amphora_repo_update.reset_mock() mock_amphora_repo_update.reset_mock()
amphora_update_vrrp_interface_obj.execute( amphora_update_vrrp_interface_obj.execute(
[_amphora_mock], 0, timeout_dict) 0, [_amphora_mock], timeout_dict)
mock_amphora_repo_update.assert_called_once_with( mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR) _session_mock, _amphora_mock.id, status=constants.ERROR)
@ -692,19 +692,6 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_update.assert_called_once_with( mock_amphora_repo_update.assert_called_once_with(
_session_mock, _amphora_mock.id, status=constants.ERROR) _session_mock, _amphora_mock.id, status=constants.ERROR)
def test_amphora_vrrp_stop(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_update):
amphora_vrrp_stop_obj = (
amphora_driver_tasks.AmphoraVRRPStop())
amphora_vrrp_stop_obj.execute(_LB_mock)
mock_driver.stop_vrrp_service.assert_called_once_with(_LB_mock)
def test_amphora_vrrp_start(self, def test_amphora_vrrp_start(self,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,

View File

@ -1599,54 +1599,6 @@ class TestControllerWorker(base.TestCase):
assert_called_once_with(FAKE_FLOW, store=expected_stored_params)) assert_called_once_with(FAKE_FLOW, store=expected_stored_params))
_flow_mock.run.assert_called_once_with() _flow_mock.run.assert_called_once_with()
@mock.patch(
'octavia.db.repositories.AmphoraRepository.get_lb_for_amphora',
return_value=None)
@mock.patch('octavia.controller.worker.v1.flows.'
'amphora_flows.AmphoraFlows.get_failover_amphora_flow',
return_value=_flow_mock)
def test_failover_spare_amphora(self,
mock_get_failover_flow,
mock_get_lb_for_amphora,
mock_api_get_session,
mock_dyn_log_listener,
mock_taskflow_load,
mock_pool_repo_get,
mock_member_repo_get,
mock_l7rule_repo_get,
mock_l7policy_repo_get,
mock_listener_repo_get,
mock_lb_repo_get,
mock_health_mon_repo_get,
mock_amp_repo_get):
_flow_mock.reset_mock()
# simulate a spare amphora (amphora not attached to any load_balancer)
mock_amphora = mock.MagicMock()
mock_amphora.id = AMP_ID
mock_amphora.status = constants.AMPHORA_READY
mock_amphora.load_balancer_id = None
mock_amp_repo_get.return_value = mock_amphora
cw = controller_worker.ControllerWorker()
cw.failover_amphora(AMP_ID)
(base_taskflow.BaseTaskFlowEngine.taskflow_load.
assert_called_once_with(
_flow_mock,
store={constants.LOADBALANCER: None,
constants.LOADBALANCER_ID: None,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_FAILOVER_PRIORITY,
constants.FLAVOR: {},
constants.SERVER_GROUP_ID: None,
constants.AVAILABILITY_ZONE: {},
constants.VIP: None
}))
_flow_mock.run.assert_called_once_with()
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete') @mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
def test_failover_deleted_amphora(self, def test_failover_deleted_amphora(self,
mock_delete, mock_delete,

View File

@ -16,6 +16,7 @@ from unittest import mock
from oslo_config import cfg from oslo_config import cfg
from oslo_config import fixture as oslo_fixture from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow from taskflow.patterns import linear_flow as flow
from octavia.common import constants from octavia.common import constants
@ -42,6 +43,7 @@ class TestAmphoraFlows(base.TestCase):
self.amp1 = data_models.Amphora(id=1) self.amp1 = data_models.Amphora(id=1)
self.amp2 = data_models.Amphora(id=2) self.amp2 = data_models.Amphora(id=2)
self.amp3 = data_models.Amphora(id=3, status=constants.DELETED) self.amp3 = data_models.Amphora(id=3, status=constants.DELETED)
self.amp4 = data_models.Amphora(id=uuidutils.generate_uuid())
self.lb = data_models.LoadBalancer( self.lb = data_models.LoadBalancer(
id=4, amphorae=[self.amp1, self.amp2, self.amp3]) id=4, amphorae=[self.amp1, self.amp2, self.amp3])
@ -62,7 +64,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(3, len(amp_flow.requires)) self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_flow_cert(self, mock_get_net_driver): def test_get_create_amphora_flow_cert(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows() self.AmpFlow = amphora_flows.AmphoraFlows()
@ -82,7 +84,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires) self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(3, len(amp_flow.requires)) self.assertEqual(4, len(amp_flow.requires))
def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver): def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver):
@ -103,7 +105,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires)) self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver):
@ -126,7 +128,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires)) self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_create_amphora_for_lb_flow( def test_get_cert_master_create_amphora_for_lb_flow(
self, mock_get_net_driver): self, mock_get_net_driver):
@ -150,7 +152,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires)) self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver): self, mock_get_net_driver):
@ -200,7 +202,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires)) self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_bogus_create_amphora_for_lb_flow( def test_get_cert_bogus_create_amphora_for_lb_flow(
self, mock_get_net_driver): self, mock_get_net_driver):
@ -223,7 +225,7 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertEqual(5, len(amp_flow.provides)) self.assertEqual(5, len(amp_flow.provides))
self.assertEqual(4, len(amp_flow.requires)) self.assertEqual(5, len(amp_flow.requires))
def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow(
self, mock_get_net_driver): self, mock_get_net_driver):
@ -253,14 +255,14 @@ class TestAmphoraFlows(base.TestCase):
def test_get_delete_amphora_flow(self, mock_get_net_driver): def test_get_delete_amphora_flow(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_delete_amphora_flow() amp_flow = self.AmpFlow.get_delete_amphora_flow(self.amp4.to_dict())
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.AMPHORA, amp_flow.requires) # This flow injects the required data at flow compile time.
self.assertEqual(0, len(amp_flow.provides)) self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(1, len(amp_flow.requires)) self.assertEqual(0, len(amp_flow.requires))
def test_allocate_amp_to_lb_decider(self, mock_get_net_driver): def test_allocate_amp_to_lb_decider(self, mock_get_net_driver):
history = mock.MagicMock() history = mock.MagicMock()
@ -282,102 +284,103 @@ class TestAmphoraFlows(base.TestCase):
result = self.AmpFlow._create_new_amp_for_lb_decider(history) result = self.AmpFlow._create_new_amp_for_lb_decider(history)
self.assertFalse(result) self.assertFalse(result)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') def test_get_failover_flow_act_stdby(self, mock_get_net_driver):
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock()) failed_amphora = data_models.Amphora(
def test_get_failover_flow_allocated(self, mock_session, mock_get_lb, id=uuidutils.generate_uuid(), role=constants.ROLE_MASTER,
mock_get_net_driver): load_balancer_id=uuidutils.generate_uuid()).to_dict()
mock_get_lb.return_value = self.lb
provider_lb = {constants.LOADBALANCER_ID: '1234'} amp_flow = self.AmpFlow.get_failover_amphora_flow(
amp_flow = self.AmpFlow.get_failover_flow( failed_amphora, 2)
load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides) self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides)) self.assertEqual(13, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow( def test_get_failover_flow_standalone(self, mock_get_net_driver):
role=constants.ROLE_MASTER, load_balancer=provider_lb) failed_amphora = data_models.Amphora(
id=uuidutils.generate_uuid(), role=constants.ROLE_STANDALONE,
load_balancer_id=uuidutils.generate_uuid(),
vrrp_ip='2001:3b8::32').to_dict()
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.LOADBALANCER, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides) self.assertIn(constants.ADDED_PORTS, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides) self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides) self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.BASE_PORT, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides) self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides) self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.DELTA, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(7, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides)) self.assertEqual(12, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow( def test_get_failover_flow_bogus_role(self, mock_get_net_driver):
role=constants.ROLE_BACKUP, load_balancer=provider_lb) failed_amphora = data_models.Amphora(id=uuidutils.generate_uuid(),
role='bogus').to_dict()
amp_flow = self.AmpFlow.get_failover_amphora_flow(
failed_amphora, 1)
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides) self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides)) self.assertEqual(1, len(amp_flow.provides))
amp_flow = self.AmpFlow.get_failover_flow(
role='BOGUSROLE', load_balancer=provider_lb)
self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.AMP_DATA, amp_flow.provides)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
self.assertIn(constants.LISTENERS, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(5, len(amp_flow.requires))
self.assertEqual(12, len(amp_flow.provides))
def test_get_failover_flow_spare(self, mock_get_net_driver): def test_get_failover_flow_spare(self, mock_get_net_driver):
amp_flow = self.AmpFlow.get_failover_flow() amp_flow = self.AmpFlow.get_failover_amphora_flow(
self.amp4.to_dict(), 0)
self.assertIsInstance(amp_flow, flow.Flow) self.assertIsInstance(amp_flow, flow.Flow)
self.assertIn(constants.FAILED_AMPHORA, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.VIP_SG_ID, amp_flow.provides)
self.assertEqual(1, len(amp_flow.requires)) self.assertEqual(1, len(amp_flow.requires))
self.assertEqual(0, len(amp_flow.provides)) self.assertEqual(1, len(amp_flow.provides))
def test_cert_rotate_amphora_flow(self, mock_get_net_driver): def test_cert_rotate_amphora_flow(self, mock_get_net_driver):
self.AmpFlow = amphora_flows.AmphoraFlows() self.AmpFlow = amphora_flows.AmphoraFlows()
@ -396,12 +399,30 @@ class TestAmphoraFlows(base.TestCase):
self.assertIsInstance(vrrp_subflow, flow.Flow) self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.LOADBALANCER, vrrp_subflow.provides) self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER, vrrp_subflow.requires) self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides)) self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(1, len(vrrp_subflow.requires)) self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_vrrp_subflow_dont_create_vrrp_group(
self, mock_get_net_driver):
vrrp_subflow = self.AmpFlow.get_vrrp_subflow('123',
create_vrrp_group=False)
self.assertIsInstance(vrrp_subflow, flow.Flow)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, vrrp_subflow.provides)
self.assertIn(constants.AMP_VRRP_INT, vrrp_subflow.provides)
self.assertIn(constants.LOADBALANCER_ID, vrrp_subflow.requires)
self.assertIn(constants.AMPHORAE, vrrp_subflow.requires)
self.assertEqual(2, len(vrrp_subflow.provides))
self.assertEqual(2, len(vrrp_subflow.requires))
def test_get_post_map_lb_subflow(self, mock_get_net_driver): def test_get_post_map_lb_subflow(self, mock_get_net_driver):
@ -414,9 +435,8 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.FLAVOR, amp_flow.requires) self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.requires) self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow( amp_flow = self.AmpFlow._get_post_map_lb_subflow(
@ -426,9 +446,8 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.FLAVOR, amp_flow.requires) self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.requires) self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow( amp_flow = self.AmpFlow._get_post_map_lb_subflow(
@ -438,9 +457,8 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.FLAVOR, amp_flow.requires) self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.requires) self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(2, len(amp_flow.requires))
amp_flow = self.AmpFlow._get_post_map_lb_subflow( amp_flow = self.AmpFlow._get_post_map_lb_subflow(
@ -450,9 +468,8 @@ class TestAmphoraFlows(base.TestCase):
self.assertIn(constants.FLAVOR, amp_flow.requires) self.assertIn(constants.FLAVOR, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.requires) self.assertIn(constants.AMPHORA, amp_flow.requires)
self.assertIn(constants.AMPHORA, amp_flow.provides)
self.assertEqual(1, len(amp_flow.provides)) self.assertEqual(0, len(amp_flow.provides))
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(2, len(amp_flow.requires))
def test_update_amphora_config_flow(self, mock_get_net_driver): def test_update_amphora_config_flow(self, mock_get_net_driver):

View File

@ -14,6 +14,7 @@
# #
from unittest import mock from unittest import mock
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow from taskflow.patterns import linear_flow as flow
from octavia.common import constants from octavia.common import constants
@ -57,15 +58,16 @@ class TestListenerFlows(base.TestCase):
self.assertEqual(0, len(listener_flow.provides)) self.assertEqual(0, len(listener_flow.provides))
def test_get_delete_listener_internal_flow(self, mock_get_net_driver): def test_get_delete_listener_internal_flow(self, mock_get_net_driver):
fake_listener = {constants.LISTENER_ID: uuidutils.generate_uuid()}
listener_flow = self.ListenerFlow.get_delete_listener_internal_flow( listener_flow = self.ListenerFlow.get_delete_listener_internal_flow(
'test-listener') fake_listener)
self.assertIsInstance(listener_flow, flow.Flow) self.assertIsInstance(listener_flow, flow.Flow)
self.assertIn('test-listener', listener_flow.requires) self.assertIn(constants.LOADBALANCER_ID, listener_flow.requires)
self.assertIn(constants.PROJECT_ID, listener_flow.requires) self.assertIn(constants.PROJECT_ID, listener_flow.requires)
self.assertEqual(3, len(listener_flow.requires)) self.assertEqual(2, len(listener_flow.requires))
self.assertEqual(0, len(listener_flow.provides)) self.assertEqual(0, len(listener_flow.provides))
def test_get_update_listener_flow(self, mock_get_net_driver): def test_get_update_listener_flow(self, mock_get_net_driver):

View File

@ -16,10 +16,12 @@ from unittest import mock
from oslo_config import cfg from oslo_config import cfg
from oslo_config import fixture as oslo_fixture from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.patterns import linear_flow as flow from taskflow.patterns import linear_flow as flow
from octavia.common import constants from octavia.common import constants
from octavia.common import exceptions from octavia.common import exceptions
from octavia.controller.worker.v2.flows import flow_utils
from octavia.controller.worker.v2.flows import load_balancer_flows from octavia.controller.worker.v2.flows import load_balancer_flows
import octavia.tests.unit.base as base import octavia.tests.unit.base as base
@ -106,34 +108,38 @@ class TestLoadBalancerFlows(base.TestCase):
lb_mock = mock.Mock() lb_mock = mock.Mock()
listener_mock = mock.Mock() listener_mock = mock.Mock()
listener_mock.id = '123' listener_mock.id = '123'
listener_dict = {constants.LISTENER_ID: '123'}
listener_mock.to_dict.return_value = {'id': '123'} listener_mock.to_dict.return_value = {'id': '123'}
lb_mock.listeners = [listener_mock] lb_mock.listeners = [listener_mock]
lb_mock.id = '321' lb_mock.id = '321'
lb_mock.project_id = '876' lb_mock.project_id = '876'
pool_mock = mock.Mock() pool_mock = mock.Mock()
pool_mock.id = '345' pool_mock.id = '345'
pool_mock.to_dict.return_value = {constants.ID: pool_mock.id}
pool_mock.listeners = None
pool_mock.health_monitor = None
pool_mock.members = None
lb_mock.pools = [pool_mock] lb_mock.pools = [pool_mock]
l7_mock = mock.Mock() l7_mock = mock.Mock()
l7_mock.id = '678' l7_mock.id = '678'
listener_mock.l7policies = [l7_mock] listener_mock.l7policies = [l7_mock]
mock_get_lb.return_value = lb_mock mock_get_lb.return_value = lb_mock
lb_dict = {constants.LOADBALANCER_ID: lb_mock.id} lb_dict = {constants.LOADBALANCER_ID: lb_mock.id}
store = self.LBFlow.get_delete_listeners_store(lb_mock)
store.update(self.LBFlow.get_delete_pools_store(lb_mock)) listeners = flow_utils.get_listeners_on_lb(lb_mock)
pools = flow_utils.get_pools_on_lb(lb_mock)
lb_flow = self.LBFlow.get_cascade_delete_load_balancer_flow( lb_flow = self.LBFlow.get_cascade_delete_load_balancer_flow(
lb_dict) lb_dict, listeners, pools)
self.assertIsInstance(lb_flow, flow.Flow) self.assertIsInstance(lb_flow, flow.Flow)
self.assertEqual({'listener_123': listener_dict,
constants.LOADBALANCER_ID: lb_mock.id,
constants.PROJECT_ID: lb_mock.project_id,
'pool345': pool_mock.id}, store)
self.assertIn(constants.LOADBALANCER, lb_flow.requires) self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, lb_flow.requires)
self.assertIn(constants.PROJECT_ID, lb_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, lb_flow.requires)
self.assertEqual(1, len(lb_flow.provides)) self.assertEqual(1, len(lb_flow.provides))
self.assertEqual(6, len(lb_flow.requires)) self.assertEqual(4, len(lb_flow.requires))
def test_get_update_load_balancer_flow(self, mock_get_net_driver): def test_get_update_load_balancer_flow(self, mock_get_net_driver):
@ -168,10 +174,14 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.UPDATE_DICT, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(2, len(amp_flow.provides)) self.assertEqual(2, len(amp_flow.requires), amp_flow.requires)
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(4, len(amp_flow.provides), amp_flow.provides)
# Test mark_active=False # Test mark_active=False
amp_flow = self.LBFlow.get_post_lb_amp_association_flow( amp_flow = self.LBFlow.get_post_lb_amp_association_flow(
@ -181,10 +191,14 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires) self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
self.assertIn(constants.UPDATE_DICT, amp_flow.requires) self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
self.assertIn(constants.AMPHORAE, amp_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, amp_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, amp_flow.provides)
self.assertIn(constants.LOADBALANCER, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides)
self.assertEqual(2, len(amp_flow.provides)) self.assertEqual(2, len(amp_flow.requires), amp_flow.requires)
self.assertEqual(2, len(amp_flow.requires)) self.assertEqual(4, len(amp_flow.provides), amp_flow.provides)
def test_get_create_load_balancer_flows_single_listeners( def test_get_create_load_balancer_flows_single_listeners(
self, mock_get_net_driver): self, mock_get_net_driver):
@ -200,16 +214,18 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertIn(constants.LISTENERS, create_flow.provides) self.assertIn(constants.LISTENERS, create_flow.provides)
self.assertIn(constants.AMPHORA, create_flow.provides) self.assertIn(constants.AMPHORA, create_flow.provides)
self.assertIn(constants.AMPHORA_ID, create_flow.provides) self.assertIn(constants.AMPHORA_ID, create_flow.provides)
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.COMPUTE_ID, create_flow.provides) self.assertIn(constants.COMPUTE_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
self.assertIn(constants.LOADBALANCER, create_flow.provides) self.assertIn(constants.LOADBALANCER, create_flow.provides)
self.assertIn(constants.DELTAS, create_flow.provides) self.assertIn(constants.DELTAS, create_flow.provides)
self.assertIn(constants.ADDED_PORTS, create_flow.provides) self.assertIn(constants.ADDED_PORTS, create_flow.provides)
self.assertIn(constants.SERVER_PEM, create_flow.provides)
self.assertIn(constants.SUBNET, create_flow.provides)
self.assertIn(constants.VIP, create_flow.provides) self.assertIn(constants.VIP, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
self.assertEqual(5, len(create_flow.requires)) self.assertEqual(6, len(create_flow.requires))
self.assertEqual(13, len(create_flow.provides), self.assertEqual(13, len(create_flow.provides),
create_flow.provides) create_flow.provides)
@ -221,22 +237,232 @@ class TestLoadBalancerFlows(base.TestCase):
) )
) )
self.assertIsInstance(create_flow, flow.Flow) self.assertIsInstance(create_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, create_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, create_flow.requires)
self.assertIn(constants.FLAVOR, create_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, create_flow.requires) self.assertIn(constants.LOADBALANCER_ID, create_flow.requires)
self.assertIn(constants.SERVER_GROUP_ID, create_flow.requires)
self.assertIn(constants.UPDATE_DICT, create_flow.requires) self.assertIn(constants.UPDATE_DICT, create_flow.requires)
self.assertIn(constants.LISTENERS, create_flow.provides) self.assertIn(constants.ADDED_PORTS, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.AMP_VRRP_INT, create_flow.provides)
self.assertIn(constants.AMPHORA, create_flow.provides) self.assertIn(constants.AMPHORA, create_flow.provides)
self.assertIn(constants.AMPHORAE, create_flow.provides)
self.assertIn(constants.AMPHORA_ID, create_flow.provides) self.assertIn(constants.AMPHORA_ID, create_flow.provides)
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides)
self.assertIn(constants.COMPUTE_ID, create_flow.provides) self.assertIn(constants.COMPUTE_ID, create_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, create_flow.provides) self.assertIn(constants.COMPUTE_OBJ, create_flow.provides)
self.assertIn(constants.LOADBALANCER, create_flow.provides)
self.assertIn(constants.DELTAS, create_flow.provides) self.assertIn(constants.DELTAS, create_flow.provides)
self.assertIn(constants.ADDED_PORTS, create_flow.provides) self.assertIn(constants.LOADBALANCER, create_flow.provides)
self.assertIn(constants.LISTENERS, create_flow.provides)
self.assertIn(constants.SERVER_PEM, create_flow.provides)
self.assertIn(constants.SUBNET, create_flow.provides)
self.assertIn(constants.VIP, create_flow.provides) self.assertIn(constants.VIP, create_flow.provides)
self.assertIn(constants.AMP_DATA, create_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, self.assertEqual(6, len(create_flow.requires), create_flow.requires)
self.assertEqual(16, len(create_flow.provides),
create_flow.provides) create_flow.provides)
self.assertEqual(5, len(create_flow.requires)) def _test_get_failover_LB_flow_single(self, amphorae):
self.assertEqual(14, len(create_flow.provides), lb_mock = mock.MagicMock()
create_flow.provides) lb_mock.id = uuidutils.generate_uuid()
lb_mock.topology = constants.TOPOLOGY_SINGLE
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
self.assertIsInstance(failover_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
self.assertIn(constants.FLAVOR, failover_flow.requires)
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
self.assertIn(constants.ADDED_PORTS, failover_flow.provides)
self.assertIn(constants.AMPHORA, failover_flow.provides)
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
failover_flow.provides)
self.assertIn(constants.BASE_PORT, failover_flow.provides)
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
self.assertIn(constants.DELTA, failover_flow.provides)
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
self.assertIn(constants.VIP, failover_flow.provides)
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
self.assertEqual(6, len(failover_flow.requires),
failover_flow.requires)
self.assertEqual(12, len(failover_flow.provides),
failover_flow.provides)
def test_get_failover_LB_flow_no_amps_single(self, mock_get_net_driver):
self._test_get_failover_LB_flow_single([])
def test_get_failover_LB_flow_one_amp_single(self, mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_STANDALONE,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_single([amphora_dict])
def test_get_failover_LB_flow_one_spare_amp_single(self,
mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: 'bogus',
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_single([amphora_dict])
def test_get_failover_LB_flow_one_bogus_amp_single(self,
mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: 'bogus',
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_single([amphora_dict])
def test_get_failover_LB_flow_two_amp_single(self, mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid()}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_STANDALONE,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_single([amphora_dict, amphora2_dict])
def _test_get_failover_LB_flow_no_amps_act_stdby(self, amphorae):
lb_mock = mock.MagicMock()
lb_mock.id = uuidutils.generate_uuid()
lb_mock.topology = constants.TOPOLOGY_ACTIVE_STANDBY
failover_flow = self.LBFlow.get_failover_LB_flow(amphorae, lb_mock)
self.assertIsInstance(failover_flow, flow.Flow)
self.assertIn(constants.AVAILABILITY_ZONE, failover_flow.requires)
self.assertIn(constants.BUILD_TYPE_PRIORITY, failover_flow.requires)
self.assertIn(constants.FLAVOR, failover_flow.requires)
self.assertIn(constants.LOADBALANCER, failover_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, failover_flow.requires)
self.assertIn(constants.ADDED_PORTS, failover_flow.provides)
self.assertIn(constants.AMPHORA, failover_flow.provides)
self.assertIn(constants.AMPHORA_ID, failover_flow.provides)
self.assertIn(constants.AMPHORAE_NETWORK_CONFIG,
failover_flow.provides)
self.assertIn(constants.BASE_PORT, failover_flow.provides)
self.assertIn(constants.COMPUTE_ID, failover_flow.provides)
self.assertIn(constants.COMPUTE_OBJ, failover_flow.provides)
self.assertIn(constants.DELTA, failover_flow.provides)
self.assertIn(constants.LOADBALANCER, failover_flow.provides)
self.assertIn(constants.SERVER_PEM, failover_flow.provides)
self.assertIn(constants.VIP, failover_flow.provides)
self.assertIn(constants.VIP_SG_ID, failover_flow.provides)
self.assertEqual(6, len(failover_flow.requires),
failover_flow.requires)
self.assertEqual(12, len(failover_flow.provides),
failover_flow.provides)
def test_get_failover_LB_flow_no_amps_act_stdby(self, mock_get_net_driver):
self._test_get_failover_LB_flow_no_amps_act_stdby([])
def test_get_failover_LB_flow_one_amps_act_stdby(self, amphorae):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict])
def test_get_failover_LB_flow_two_amps_act_stdby(self,
mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '192.0.2.46'}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_BACKUP,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '2001:db8::46'}
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict,
amphora2_dict])
def test_get_failover_LB_flow_three_amps_act_stdby(self,
mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '192.0.2.46'}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_BACKUP,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '2001:db8::46'}
amphora3_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: 'bogus',
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: None, constants.VRRP_IP: None}
self._test_get_failover_LB_flow_no_amps_act_stdby(
[amphora_dict, amphora2_dict, amphora3_dict])
def test_get_failover_LB_flow_two_amps_bogus_act_stdby(
self, mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: 'bogus',
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '192.0.2.46'}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '2001:db8::46'}
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict,
amphora2_dict])
def test_get_failover_LB_flow_two_amps_spare_act_stdby(
self, mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: None,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '192.0.2.46'}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '2001:db8::46'}
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict,
amphora2_dict])
def test_get_failover_LB_flow_two_amps_standalone_act_stdby(
self, mock_get_net_driver):
amphora_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_STANDALONE,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '192.0.2.46'}
amphora2_dict = {constants.ID: uuidutils.generate_uuid(),
constants.ROLE: constants.ROLE_MASTER,
constants.COMPUTE_ID: uuidutils.generate_uuid(),
constants.VRRP_PORT_ID: uuidutils.generate_uuid(),
constants.VRRP_IP: '2001:db8::46'}
self._test_get_failover_LB_flow_no_amps_act_stdby([amphora_dict,
amphora2_dict])

View File

@ -60,10 +60,9 @@ class TestPoolFlows(base.TestCase):
pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test') pool_flow = self.PoolFlow.get_delete_pool_flow_internal('test')
self.assertIsInstance(pool_flow, flow.Flow) self.assertIsInstance(pool_flow, flow.Flow)
self.assertIn('test', pool_flow.requires)
self.assertIn(constants.PROJECT_ID, pool_flow.requires) self.assertIn(constants.PROJECT_ID, pool_flow.requires)
self.assertEqual(2, len(pool_flow.requires)) self.assertEqual(1, len(pool_flow.requires))
self.assertEqual(1, len(pool_flow.provides)) self.assertEqual(1, len(pool_flow.provides))
def test_get_update_pool_flow(self): def test_get_update_pool_flow(self):

View File

@ -41,6 +41,7 @@ FAKE_CONFIG_FILE = 'fake config file'
_db_amphora_mock = mock.MagicMock() _db_amphora_mock = mock.MagicMock()
_db_amphora_mock.id = AMP_ID _db_amphora_mock.id = AMP_ID
_db_amphora_mock.status = constants.AMPHORA_ALLOCATED _db_amphora_mock.status = constants.AMPHORA_ALLOCATED
_db_amphora_mock.vrrp_ip = '198.51.100.65'
_amphora_mock = { _amphora_mock = {
constants.ID: AMP_ID, constants.ID: AMP_ID,
constants.STATUS: constants.AMPHORA_ALLOCATED, constants.STATUS: constants.AMPHORA_ALLOCATED,
@ -81,10 +82,14 @@ class TestAmphoraDriverTasks(base.TestCase):
active_connection_rety_interval=CONN_RETRY_INTERVAL) active_connection_rety_interval=CONN_RETRY_INTERVAL)
conf.config(group="controller_worker", conf.config(group="controller_worker",
loadbalancer_topology=constants.TOPOLOGY_SINGLE) loadbalancer_topology=constants.TOPOLOGY_SINGLE)
self.timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
constants.REQ_READ_TIMEOUT: 2,
constants.CONN_MAX_RETRIES: 3,
constants.CONN_RETRY_INTERVAL: 4}
super().setUp() super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amp_listener_update(self, def test_amp_listeners_update(self,
mock_lb_get, mock_lb_get,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
@ -95,23 +100,46 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_get, mock_amphora_repo_get,
mock_amphora_repo_update): mock_amphora_repo_update):
timeout_dict = {constants.REQ_CONN_TIMEOUT: 1,
constants.REQ_READ_TIMEOUT: 2,
constants.CONN_MAX_RETRIES: 3,
constants.CONN_RETRY_INTERVAL: 4}
mock_amphora_repo_get.return_value = _db_amphora_mock mock_amphora_repo_get.return_value = _db_amphora_mock
mock_lb_get.return_value = _db_load_balancer_mock mock_lb_get.return_value = _db_load_balancer_mock
amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate() amp_list_update_obj = amphora_driver_tasks.AmpListenersUpdate()
amp_list_update_obj.execute(_LB_mock, 0, amp_list_update_obj.execute(_LB_mock, _amphora_mock, self.timeout_dict)
[_amphora_mock], timeout_dict)
mock_driver.update_amphora_listeners.assert_called_once_with( mock_driver.update_amphora_listeners.assert_called_once_with(
_db_load_balancer_mock, _db_amphora_mock, timeout_dict) _db_load_balancer_mock, _db_amphora_mock, self.timeout_dict)
mock_driver.update_amphora_listeners.side_effect = Exception('boom')
amp_list_update_obj.execute(_LB_mock, _amphora_mock, self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, AMP_ID, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amp_index_listener_update(self,
mock_lb_get,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_get,
mock_amphora_repo_update):
mock_amphora_repo_get.return_value = _db_amphora_mock
mock_lb_get.return_value = _db_load_balancer_mock
amp_list_update_obj = amphora_driver_tasks.AmphoraIndexListenerUpdate()
amp_list_update_obj.execute(_LB_mock, 0, [_amphora_mock],
self.timeout_dict)
mock_driver.update_amphora_listeners.assert_called_once_with(
_db_load_balancer_mock, _db_amphora_mock, self.timeout_dict)
mock_driver.update_amphora_listeners.side_effect = Exception('boom') mock_driver.update_amphora_listeners.side_effect = Exception('boom')
amp_list_update_obj.execute(_LB_mock, 0, amp_list_update_obj.execute(_LB_mock, 0,
[_amphora_mock], timeout_dict) [_amphora_mock], self.timeout_dict)
mock_amphora_repo_update.assert_called_once_with( mock_amphora_repo_update.assert_called_once_with(
_session_mock, AMP_ID, status=constants.ERROR) _session_mock, AMP_ID, status=constants.ERROR)
@ -155,6 +183,40 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertEqual(2, repo.ListenerRepository.update.call_count) self.assertEqual(2, repo.ListenerRepository.update.call_count)
self.assertIsNone(amp) self.assertIsNone(amp)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error')
def test_amphora_index_listeners_reload(
self, mock_prov_status_error, mock_lb_repo_get,
mock_driver, mock_generate_uuid, mock_log, mock_get_session,
mock_listener_repo_get, mock_listener_repo_update,
mock_amphora_repo_get, mock_amphora_repo_update):
amphora_mock = mock.MagicMock()
listeners_reload_obj = (
amphora_driver_tasks.AmphoraIndexListenersReload())
mock_lb = mock.MagicMock()
mock_listener = mock.MagicMock()
mock_listener.id = '12345'
mock_amphora_repo_get.return_value = amphora_mock
mock_lb_repo_get.return_value = mock_lb
# Test no listeners
mock_lb.listeners = None
listeners_reload_obj.execute(mock_lb, 0, None)
mock_driver.reload.assert_not_called()
# Test with listeners
mock_driver.start.reset_mock()
mock_lb.listeners = [mock_listener]
listeners_reload_obj.execute(mock_lb, 0, [amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.reload.assert_called_once_with(mock_lb, amphora_mock,
self.timeout_dict)
# Test revert
mock_lb.listeners = [mock_listener]
listeners_reload_obj.revert(mock_lb)
mock_prov_status_error.assert_called_once_with('12345')
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.' @mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'mark_listener_prov_status_error') 'mark_listener_prov_status_error')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@ -293,6 +355,12 @@ class TestAmphoraDriverTasks(base.TestCase):
status=constants.ERROR) status=constants.ERROR)
self.assertIsNone(amp) self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_finalize_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
repo.AmphoraRepository.update.assert_not_called()
def test_amphora_post_network_plug(self, def test_amphora_post_network_plug(self,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
@ -335,6 +403,12 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp) self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_network_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphorae_post_network_plug(self, mock_lb_get, def test_amphorae_post_network_plug(self, mock_lb_get,
mock_driver, mock_driver,
@ -365,6 +439,14 @@ class TestAmphoraDriverTasks(base.TestCase):
assert_called_once_with(_db_amphora_mock, assert_called_once_with(_db_amphora_mock,
network_data_models.Port(**port_mock))) network_data_models.Port(**port_mock)))
# Test with no ports to plug
mock_driver.post_network_plug.reset_mock()
_deltas_mock = {'0': [port_mock]}
amphora_post_network_plug_obj.execute(_LB_mock, _deltas_mock)
mock_driver.post_network_plug.assert_not_called()
# Test revert # Test revert
amp = amphora_post_network_plug_obj.revert(None, _LB_mock, amp = amphora_post_network_plug_obj.revert(None, _LB_mock,
_deltas_mock) _deltas_mock)
@ -387,6 +469,13 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp) self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_network_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
None)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_post_vip_plug(self, mock_lb_get, def test_amphora_post_vip_plug(self, mock_lb_get,
@ -446,6 +535,13 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp) self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_vip_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
None)
repo.AmphoraRepository.update.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update') @mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphorae_post_vip_plug(self, mock_lb_get, def test_amphorae_post_vip_plug(self, mock_lb_get,
@ -495,6 +591,13 @@ class TestAmphoraDriverTasks(base.TestCase):
self.assertIsNone(amp) self.assertIsNone(amp)
# Test revert when this task failed
repo.AmphoraRepository.update.reset_mock()
amp = amphora_post_vip_plug_obj.revert(
failure.Failure.from_exception(Exception('boom')), _amphora_mock,
None)
repo.AmphoraRepository.update.assert_not_called()
def test_amphora_cert_upload(self, def test_amphora_cert_upload(self,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
@ -515,9 +618,7 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_driver.upload_cert_amp.assert_called_once_with( mock_driver.upload_cert_amp.assert_called_once_with(
_db_amphora_mock, fer.decrypt(pem_file_mock.encode('utf-8'))) _db_amphora_mock, fer.decrypt(pem_file_mock.encode('utf-8')))
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_update_vrrp_interface(self, def test_amphora_update_vrrp_interface(self,
mock_lb_get,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
mock_log, mock_log,
@ -526,45 +627,57 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_listener_repo_update, mock_listener_repo_update,
mock_amphora_repo_get, mock_amphora_repo_get,
mock_amphora_repo_update): mock_amphora_repo_update):
_db_load_balancer_mock.amphorae = _amphorae_mock FAKE_INTERFACE = 'fake0'
mock_lb_get.return_value = _db_load_balancer_mock mock_amphora_repo_get.return_value = _db_amphora_mock
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
Exception('boom')]
timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES, timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL} constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
amphora_update_vrrp_interface_obj = ( amphora_update_vrrp_interface_obj = (
amphora_driver_tasks.AmphoraUpdateVRRPInterface()) amphora_driver_tasks.AmphoraUpdateVRRPInterface())
amphora_update_vrrp_interface_obj.execute(_LB_mock) amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
mock_driver.get_interface_from_ip.assert_called_once_with( mock_driver.get_interface_from_ip.assert_called_once_with(
_db_amphora_mock, _db_amphora_mock.vrrp_ip, _db_amphora_mock, _db_amphora_mock.vrrp_ip,
timeout_dict=timeout_dict) timeout_dict=timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _db_amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
# Test revert # Test with an exception
mock_driver.reset_mock()
_db_load_balancer_mock.amphorae = _amphorae_mock
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock)
mock_amphora_repo_update.assert_called_with(_session_mock,
_db_amphora_mock.id,
vrrp_interface=None)
mock_driver.reset_mock()
mock_amphora_repo_update.reset_mock() mock_amphora_repo_update.reset_mock()
_db_load_balancer_mock.amphorae = _amphorae_mock amphora_update_vrrp_interface_obj.execute(_amphora_mock, timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _db_amphora_mock.id, status=constants.ERROR)
failure_obj = failure.Failure.from_exception(Exception("TESTEXCEPT")) def test_amphora_index_update_vrrp_interface(
amphora_update_vrrp_interface_obj.revert(failure_obj, _LB_mock) self, mock_driver, mock_generate_uuid, mock_log, mock_get_session,
self.assertFalse(mock_amphora_repo_update.called) mock_listener_repo_get, mock_listener_repo_update,
mock_amphora_repo_get, mock_amphora_repo_update):
mock_amphora_repo_get.return_value = _db_amphora_mock
FAKE_INTERFACE = 'fake0'
mock_driver.get_interface_from_ip.side_effect = [FAKE_INTERFACE,
Exception('boom')]
# Test revert with exception timeout_dict = {constants.CONN_MAX_RETRIES: CONN_MAX_RETRIES,
mock_driver.reset_mock() constants.CONN_RETRY_INTERVAL: CONN_RETRY_INTERVAL}
amphora_update_vrrp_interface_obj = (
amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface())
amphora_update_vrrp_interface_obj.execute(
0, [_amphora_mock], timeout_dict)
mock_driver.get_interface_from_ip.assert_called_once_with(
_db_amphora_mock, _db_amphora_mock.vrrp_ip,
timeout_dict=timeout_dict)
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _db_amphora_mock.id, vrrp_interface=FAKE_INTERFACE)
# Test with an exception
mock_amphora_repo_update.reset_mock() mock_amphora_repo_update.reset_mock()
mock_amphora_repo_update.side_effect = Exception('fail') amphora_update_vrrp_interface_obj.execute(
0, [_amphora_mock], timeout_dict)
amphora_update_vrrp_interface_obj.revert("BADRESULT", _LB_mock) mock_amphora_repo_update.assert_called_once_with(
mock_amphora_repo_update.assert_called_with(_session_mock, _session_mock, _db_amphora_mock.id, status=constants.ERROR)
_db_amphora_mock.id,
vrrp_interface=None)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_update(self, def test_amphora_vrrp_update(self,
@ -578,15 +691,27 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_amphora_repo_get, mock_amphora_repo_get,
mock_amphora_repo_update): mock_amphora_repo_update):
amphorae_network_config = mock.MagicMock() amphorae_network_config = mock.MagicMock()
mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
Exception('boom')]
mock_lb_get.return_value = _db_load_balancer_mock mock_lb_get.return_value = _db_load_balancer_mock
mock_amphora_repo_get.return_value = _db_amphora_mock
amphora_vrrp_update_obj = ( amphora_vrrp_update_obj = (
amphora_driver_tasks.AmphoraVRRPUpdate()) amphora_driver_tasks.AmphoraVRRPUpdate())
amphora_vrrp_update_obj.execute(_LB_mock, amphorae_network_config) amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config,
_amphora_mock, 'fakeint0')
mock_driver.update_vrrp_conf.assert_called_once_with( mock_driver.update_vrrp_conf.assert_called_once_with(
_db_load_balancer_mock, amphorae_network_config) _db_load_balancer_mock, amphorae_network_config,
_db_amphora_mock, None)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config,
_amphora_mock, 'fakeint0')
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _db_amphora_mock.id, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_stop(self, def test_amphora_index_vrrp_update(self,
mock_lb_get, mock_lb_get,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
@ -596,16 +721,29 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_listener_repo_update, mock_listener_repo_update,
mock_amphora_repo_get, mock_amphora_repo_get,
mock_amphora_repo_update): mock_amphora_repo_update):
amphora_vrrp_stop_obj = ( amphorae_network_config = mock.MagicMock()
amphora_driver_tasks.AmphoraVRRPStop()) mock_driver.update_vrrp_conf.side_effect = [mock.DEFAULT,
Exception('boom')]
mock_lb_get.return_value = _db_load_balancer_mock mock_lb_get.return_value = _db_load_balancer_mock
amphora_vrrp_stop_obj.execute(_LB_mock) mock_amphora_repo_get.return_value = _db_amphora_mock
mock_driver.stop_vrrp_service.assert_called_once_with( amphora_vrrp_update_obj = (
_db_load_balancer_mock) amphora_driver_tasks.AmphoraIndexVRRPUpdate())
amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config,
0, [_amphora_mock], 'fakeint0',
timeout_dict=self.timeout_dict)
mock_driver.update_vrrp_conf.assert_called_once_with(
_db_load_balancer_mock, amphorae_network_config, _db_amphora_mock,
self.timeout_dict)
# Test with an exception
mock_amphora_repo_update.reset_mock()
amphora_vrrp_update_obj.execute(LB_ID, amphorae_network_config,
0, [_amphora_mock], 'fakeint0')
mock_amphora_repo_update.assert_called_once_with(
_session_mock, _db_amphora_mock.id, status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_amphora_vrrp_start(self, def test_amphora_vrrp_start(self,
mock_lb_get,
mock_driver, mock_driver,
mock_generate_uuid, mock_generate_uuid,
mock_log, mock_log,
@ -614,12 +752,30 @@ class TestAmphoraDriverTasks(base.TestCase):
mock_listener_repo_update, mock_listener_repo_update,
mock_amphora_repo_get, mock_amphora_repo_get,
mock_amphora_repo_update): mock_amphora_repo_update):
mock_amphora_repo_get.return_value = _db_amphora_mock
amphora_vrrp_start_obj = ( amphora_vrrp_start_obj = (
amphora_driver_tasks.AmphoraVRRPStart()) amphora_driver_tasks.AmphoraVRRPStart())
mock_lb_get.return_value = _db_load_balancer_mock amphora_vrrp_start_obj.execute(_amphora_mock,
amphora_vrrp_start_obj.execute(_LB_mock) timeout_dict=self.timeout_dict)
mock_driver.start_vrrp_service.assert_called_once_with( mock_driver.start_vrrp_service.assert_called_once_with(
_db_load_balancer_mock) _db_amphora_mock, self.timeout_dict)
def test_amphora_index_vrrp_start(self,
mock_driver,
mock_generate_uuid,
mock_log,
mock_get_session,
mock_listener_repo_get,
mock_listener_repo_update,
mock_amphora_repo_get,
mock_amphora_repo_update):
mock_amphora_repo_get.return_value = _db_amphora_mock
amphora_vrrp_start_obj = (
amphora_driver_tasks.AmphoraIndexVRRPStart())
amphora_vrrp_start_obj.execute(0, [_amphora_mock],
timeout_dict=self.timeout_dict)
mock_driver.start_vrrp_service.assert_called_once_with(
_db_amphora_mock, self.timeout_dict)
def test_amphora_compute_connectivity_wait(self, def test_amphora_compute_connectivity_wait(self,
mock_driver, mock_driver,

View File

@ -18,6 +18,7 @@ from cryptography import fernet
from oslo_config import cfg from oslo_config import cfg
from oslo_config import fixture as oslo_fixture from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils from oslo_utils import uuidutils
import tenacity
from octavia.common import constants from octavia.common import constants
from octavia.common import exceptions from octavia.common import exceptions
@ -178,7 +179,8 @@ class TestComputeTasks(base.TestCase):
mock_driver.build.return_value = COMPUTE_ID mock_driver.build.return_value = COMPUTE_ID
# Test execute() # Test execute()
compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port]) compute_id = createcompute.execute(_db_amphora_mock.id, ports=[_port],
server_group_id=None)
# Validate that the build method was called properly # Validate that the build method was called properly
mock_driver.build.assert_called_once_with( mock_driver.build.assert_called_once_with(
@ -508,20 +510,54 @@ class TestComputeTasks(base.TestCase):
def test_delete_amphorae_on_load_balancer(self, mock_lb_get, mock_session, def test_delete_amphorae_on_load_balancer(self, mock_lb_get, mock_session,
mock_driver): mock_driver):
mock_driver.delete.side_effect = [mock.DEFAULT,
exceptions.OctaviaException('boom')]
delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer() delete_amps = compute_tasks.DeleteAmphoraeOnLoadBalancer()
mock_lb_get.return_value = _db_load_balancer_mock mock_lb_get.return_value = _db_load_balancer_mock
delete_amps.execute(_load_balancer_mock) delete_amps.execute(_load_balancer_mock)
mock_driver.delete.assert_called_once_with(COMPUTE_ID) mock_driver.delete.assert_called_once_with(COMPUTE_ID)
# Test compute driver exception is raised
self.assertRaises(exceptions.OctaviaException, delete_amps.execute,
_load_balancer_mock)
@mock.patch('stevedore.driver.DriverManager.driver') @mock.patch('stevedore.driver.DriverManager.driver')
def test_compute_delete(self, mock_driver): def test_compute_delete(self, mock_driver):
mock_driver.delete.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'),
mock.DEFAULT, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
delete_compute = compute_tasks.ComputeDelete() delete_compute = compute_tasks.ComputeDelete()
# Limit the retry attempts for the test run to save time
delete_compute.execute.retry.stop = tenacity.stop_after_attempt(2)
delete_compute.execute(_amphora_mock) delete_compute.execute(_amphora_mock)
mock_driver.delete.assert_called_once_with(COMPUTE_ID) mock_driver.delete.assert_called_once_with(COMPUTE_ID)
# Test retry after a compute exception
mock_driver.reset_mock()
delete_compute.execute(_amphora_mock)
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
mock.call(COMPUTE_ID)])
# Test passive failure
mock_driver.reset_mock()
delete_compute.execute(_amphora_mock, passive_failure=True)
mock_driver.delete.assert_has_calls([mock.call(COMPUTE_ID),
mock.call(COMPUTE_ID)])
# Test non-passive failure
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, delete_compute.execute,
_amphora_mock, passive_failure=False)
@mock.patch('stevedore.driver.DriverManager.driver') @mock.patch('stevedore.driver.DriverManager.driver')
def test_nova_server_group_create(self, mock_driver): def test_nova_server_group_create(self, mock_driver):
nova_sever_group_obj = compute_tasks.NovaServerGroupCreate() nova_sever_group_obj = compute_tasks.NovaServerGroupCreate()
@ -567,3 +603,32 @@ class TestComputeTasks(base.TestCase):
sg_id = None sg_id = None
nova_sever_group_obj.execute(sg_id) nova_sever_group_obj.execute(sg_id)
self.assertFalse(mock_driver.delete_server_group.called, sg_id) self.assertFalse(mock_driver.delete_server_group.called, sg_id)
@mock.patch('stevedore.driver.DriverManager.driver')
def test_attach_port(self, mock_driver):
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
amphora_dict = {constants.COMPUTE_ID: COMPUTE_ID}
port_dict = {constants.ID: PORT_ID}
attach_port_obj = compute_tasks.AttachPort()
# Test execute
attach_port_obj.execute(amphora_dict, port_dict)
mock_driver.attach_network_or_port.assert_called_once_with(
COMPUTE_ID, port_id=PORT_ID)
# Test revert
mock_driver.reset_mock()
attach_port_obj.revert(amphora_dict, port_dict)
mock_driver.detach_port.assert_called_once_with(COMPUTE_ID, PORT_ID)
# Test rever exception
mock_driver.reset_mock()
mock_driver.detach_port.side_effect = [Exception('boom')]
# should not raise
attach_port_obj.revert(amphora_dict, port_dict)

View File

@ -46,10 +46,10 @@ HA_PORT_ID = uuidutils.generate_uuid()
L7POLICY_ID = uuidutils.generate_uuid() L7POLICY_ID = uuidutils.generate_uuid()
L7RULE_ID = uuidutils.generate_uuid() L7RULE_ID = uuidutils.generate_uuid()
VIP_IP = '192.0.5.2' VIP_IP = '192.0.5.2'
VRRP_ID = 1
VRRP_IP = '192.0.5.3' VRRP_IP = '192.0.5.3'
HA_IP = '192.0.5.4' HA_IP = '192.0.5.4'
AMP_ROLE = 'FAKE_ROLE' AMP_ROLE = 'FAKE_ROLE'
VRRP_ID = random.randrange(255)
VRRP_PRIORITY = random.randrange(100) VRRP_PRIORITY = random.randrange(100)
CACHED_ZONE = 'zone1' CACHED_ZONE = 'zone1'
IMAGE_ID = uuidutils.generate_uuid() IMAGE_ID = uuidutils.generate_uuid()
@ -542,8 +542,15 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_update, mock_amphora_repo_update,
mock_amphora_repo_delete): mock_amphora_repo_delete):
amphora_dict = {constants.ID: AMP_ID}
vip_dict = {constants.IP_ADDRESS: HA_IP,
constants.PORT_ID: HA_PORT_ID}
fixed_ips = [{constants.IP_ADDRESS: VRRP_IP}]
base_port_dict = {constants.ID: VRRP_PORT_ID,
constants.FIXED_IPS: fixed_ips}
update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails() update_amp_fo_details = database_tasks.UpdateAmpFailoverDetails()
update_amp_fo_details.execute(self.amphora, self.amphora) update_amp_fo_details.execute(amphora_dict, vip_dict, base_port_dict)
mock_amphora_repo_update.assert_called_once_with( mock_amphora_repo_update.assert_called_once_with(
'TEST', 'TEST',
@ -1337,16 +1344,16 @@ class TestDatabaseTasks(base.TestCase):
mock_amphora_repo_update, mock_amphora_repo_update,
mock_amphora_repo_delete): mock_amphora_repo_delete):
unused_pool = data_models.Pool(id='unused_pool') unused_pool = data_models.Pool(id='unused_pool')
members1 = [{constants.MEMBER_ID: 'member1'}, members1 = [data_models.Member(id='member1'),
{constants.MEMBER_ID: 'member2'}] data_models.Member(id='member2')]
health_monitor = data_models.HealthMonitor(id='hm1') health_monitor = data_models.HealthMonitor(id='hm1')
default_pool = data_models.Pool(id='default_pool', default_pool = data_models.Pool(id='default_pool',
members=members1, members=members1,
health_monitor=health_monitor) health_monitor=health_monitor)
listener1 = data_models.Listener(id='listener1', listener1 = data_models.Listener(id='listener1',
default_pool=default_pool) default_pool=default_pool)
members2 = [{constants.MEMBER_ID: 'member3'}, members2 = [data_models.Member(id='member3'),
{constants.MEMBER_ID: 'member4'}] data_models.Member(id='member4')]
redirect_pool = data_models.Pool(id='redirect_pool', redirect_pool = data_models.Pool(id='redirect_pool',
members=members2) members=members2)
l7rules = [data_models.L7Rule(id='rule1')] l7rules = [data_models.L7Rule(id='rule1')]
@ -1954,6 +1961,22 @@ class TestDatabaseTasks(base.TestCase):
result = get_vip_from_lb_obj.execute(self.loadbalancer_mock) result = get_vip_from_lb_obj.execute(self.loadbalancer_mock)
self.assertEqual(_vip_mock.to_dict(), result) self.assertEqual(_vip_mock.to_dict(), result)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_loadbalancer(self, mock_lb_get, mock_generate_uuid, mock_LOG,
mock_get_session, mock_loadbalancer_repo_update,
mock_listener_repo_update,
mock_amphora_repo_update,
mock_amphora_repo_delete):
LB_ID = uuidutils.generate_uuid()
get_loadbalancer_obj = database_tasks.GetLoadBalancer()
mock_lb_get.return_value = _db_loadbalancer_mock
result = get_loadbalancer_obj.execute(LB_ID)
self.assertEqual(self.loadbalancer_mock, result)
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
@mock.patch('octavia.db.repositories.VRRPGroupRepository.create') @mock.patch('octavia.db.repositories.VRRPGroupRepository.create')
def test_create_vrrp_group_for_lb(self, def test_create_vrrp_group_for_lb(self,
mock_vrrp_group_create, mock_vrrp_group_create,
@ -1968,7 +1991,7 @@ class TestDatabaseTasks(base.TestCase):
mock_get_session.side_effect = ['TEST', mock_get_session.side_effect = ['TEST',
odb_exceptions.DBDuplicateEntry] odb_exceptions.DBDuplicateEntry]
create_vrrp_group = database_tasks.CreateVRRPGroupForLB() create_vrrp_group = database_tasks.CreateVRRPGroupForLB()
create_vrrp_group.execute(self.loadbalancer_mock) create_vrrp_group.execute(LB_ID)
mock_vrrp_group_create.assert_called_once_with( mock_vrrp_group_create.assert_called_once_with(
'TEST', load_balancer_id=LB_ID, 'TEST', load_balancer_id=LB_ID,
vrrp_group_name=LB_ID.replace('-', ''), vrrp_group_name=LB_ID.replace('-', ''),

View File

@ -18,10 +18,12 @@ from oslo_config import cfg
from oslo_config import fixture as oslo_fixture from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils from oslo_utils import uuidutils
from taskflow.types import failure from taskflow.types import failure
import tenacity
from octavia.api.drivers import utils as provider_utils from octavia.api.drivers import utils as provider_utils
from octavia.common import constants from octavia.common import constants
from octavia.common import data_models as o_data_models from octavia.common import data_models as o_data_models
from octavia.common import exceptions
from octavia.controller.worker.v2.tasks import network_tasks from octavia.controller.worker.v2.tasks import network_tasks
from octavia.network import base as net_base from octavia.network import base as net_base
from octavia.network import data_models from octavia.network import data_models
@ -81,6 +83,11 @@ class TestNetworkTasks(base.TestCase):
self.db_amphora_mock.id = AMPHORA_ID self.db_amphora_mock.id = AMPHORA_ID
self.db_amphora_mock.compute_id = COMPUTE_ID self.db_amphora_mock.compute_id = COMPUTE_ID
self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED
self.boot_net_id = NETWORK_ID
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
conf.config(group="networking", max_retries=1)
self.amphora_mock = {constants.ID: AMPHORA_ID, self.amphora_mock = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID, constants.COMPUTE_ID: COMPUTE_ID,
constants.LB_NETWORK_IP: IP_ADDRESS, constants.LB_NETWORK_IP: IP_ADDRESS,
@ -94,10 +101,85 @@ class TestNetworkTasks(base.TestCase):
} }
conf = oslo_fixture.Config(cfg.CONF) conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="controller_worker", amp_boot_network_list=['netid']) conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
super().setUp() super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
DELETE_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_SUBNET_ID = uuidutils.generate_uuid()
VRRP_PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
member_mock = mock.MagicMock()
member_mock.subnet_id = MEMBER_SUBNET_ID
pool_mock = mock.MagicMock()
pool_mock.members = [member_mock]
lb_mock = mock.MagicMock()
lb_mock.pools = [pool_mock]
lb_dict = {constants.LOADBALANCER_ID: LB_ID}
amphora_dict = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: VRRP_PORT_ID}
vrrp_port_mock = mock.MagicMock()
vrrp_port_mock.network_id = self.boot_net_id
vrrp_port_dict = {constants.NETWORK_ID: self.boot_net_id}
mock_subnet = mock.MagicMock()
mock_subnet.network_id = MEMBER_NETWORK_ID
nic1_delete_mock = mock.MagicMock()
nic1_delete_mock.network_id = DELETE_NETWORK_ID
nic2_keep_mock = mock.MagicMock()
nic2_keep_mock.network_id = self.boot_net_id
mock_lb_repo_get.return_value = lb_mock
mock_driver.get_port.return_value = vrrp_port_mock
mock_driver.get_subnet.return_value = mock_subnet
mock_driver.get_plugged_networks.return_value = [nic1_delete_mock,
nic2_keep_mock]
calc_amp_delta = network_tasks.CalculateAmphoraDelta()
# Test vrrp_port_id is None
result = calc_amp_delta.execute(lb_dict, amphora_dict, {})
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Test with vrrp_port_id
mock_driver.reset_mock()
result = calc_amp_delta.execute(lb_dict, amphora_dict, {},
vrrp_port=vrrp_port_dict)
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_not_called()
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_delta(self, mock_get_session, mock_get_lb, def test_calculate_delta(self, mock_get_session, mock_get_lb,
@ -110,9 +192,9 @@ class TestNetworkTasks(base.TestCase):
constants.VRRP_PORT_ID: PORT_ID} constants.VRRP_PORT_ID: PORT_ID}
mock_get_net_driver.return_value = mock_driver mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.return_value = [ mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id='netid')] data_models.Interface(network_id=self.boot_net_id)]
mock_driver.get_port.return_value = data_models.Port( mock_driver.get_port.return_value = data_models.Port(
network_id='netid') network_id=self.boot_net_id)
EMPTY = {} EMPTY = {}
empty_deltas = {self.db_amphora_mock.id: data_models.Delta( empty_deltas = {self.db_amphora_mock.id: data_models.Delta(
amphora_id=AMPHORA_ID, amphora_id=AMPHORA_ID,
@ -179,7 +261,7 @@ class TestNetworkTasks(base.TestCase):
pool_mock.members = [member_mock] pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [ mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=3), data_models.Interface(network_id=3),
data_models.Interface(network_id='netid')] data_models.Interface(network_id=self.boot_net_id)]
self.assertEqual(empty_deltas, self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {})) calc_delta.execute(self.load_balancer_mock, {}))
@ -192,7 +274,7 @@ class TestNetworkTasks(base.TestCase):
pool_mock.members = [member_mock] pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [ mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2), data_models.Interface(network_id=2),
data_models.Interface(network_id='netid')] data_models.Interface(network_id=self.boot_net_id)]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id, ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id, compute_id=self.db_amphora_mock.compute_id,
@ -210,7 +292,7 @@ class TestNetworkTasks(base.TestCase):
pool_mock.members = [] pool_mock.members = []
mock_driver.get_plugged_networks.return_value = [ mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2), data_models.Interface(network_id=2),
data_models.Interface(network_id='netid') data_models.Interface(network_id=self.boot_net_id)
] ]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id, ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
@ -648,6 +730,7 @@ class TestNetworkTasks(base.TestCase):
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY) t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_get_lb.return_value = null_qos_lb
mock_driver.reset_mock() mock_driver.reset_mock()
update_dict = {} update_dict = {}
net.execute(null_qos_lb_dict, update_dict=update_dict) net.execute(null_qos_lb_dict, update_dict=update_dict)
@ -685,7 +768,7 @@ class TestNetworkTasks(base.TestCase):
net.revert(None, pr_tm_dict, update_dict=update_dict) net.revert(None, pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with( mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY) t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count) self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock)
@ -770,6 +853,28 @@ class TestNetworkTasks(base.TestCase):
net_task.execute(listener) net_task.execute(listener)
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True) mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
@mock.patch('octavia.db.api.get_session', return_value='TEST')
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_amphora_network_configs_by_id(
self, mock_lb_get, mock_amp_get,
mock_get_session, mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
AMP_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_amp_get.return_value = 'mock amphora'
mock_lb_get.return_value = 'mock load balancer'
net_task = network_tasks.GetAmphoraNetworkConfigsByID()
net_task.execute(LB_ID, AMP_ID)
mock_driver.get_network_configs.assert_called_once_with(
'mock load balancer', amphora='mock amphora')
mock_amp_get.assert_called_once_with('TEST', id=AMP_ID)
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_get_amphorae_network_configs(self, mock_session, mock_lb_get, def test_get_amphorae_network_configs(self, mock_session, mock_lb_get,
@ -854,49 +959,6 @@ class TestNetworkTasks(base.TestCase):
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1) mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1)
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2) mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_plug_vip_port(self, mock_session, mock_get, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
vrrp_port = mock.MagicMock()
amphorae_network_config = mock.MagicMock()
mock_driver.get_port.return_value = vrrp_port
plugvipport = network_tasks.PlugVIPPort()
amp = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: '1234'}
plugvipport.execute(amp, amphorae_network_config)
mock_driver.plug_port.assert_called_once_with(self.db_amphora_mock,
vrrp_port)
dict_amp_config = {
AMPHORA_ID: {constants.VRRP_PORT: {constants.ID: 5555}}
}
# test revert
plugvipport.revert(None, amp, dict_amp_config)
mock_driver.unplug_port.assert_called_with(self.db_amphora_mock,
vrrp_port)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_wait_for_port_detach(self, mock_session, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
waitforportdetach = network_tasks.WaitForPortDetach()
waitforportdetach.execute(amphora)
mock_driver.wait_for_port_detach.assert_called_once_with(
self.db_amphora_mock)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get') @mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock) @mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_sg(self, mock_session, mock_lb_get, def test_update_vip_sg(self, mock_session, mock_lb_get,
@ -928,7 +990,7 @@ class TestNetworkTasks(base.TestCase):
mock_lb_get.return_value = LB mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmpphora() net = network_tasks.PlugVIPAmphora()
mockSubnet = mock_driver.get_subnet() mockSubnet = mock_driver.get_subnet()
net.execute(self.load_balancer_mock, amphora, mockSubnet) net.execute(self.load_balancer_mock, amphora, mockSubnet)
mock_driver.plug_aap_port.assert_called_once_with( mock_driver.plug_aap_port.assert_called_once_with(
@ -943,7 +1005,7 @@ class TestNetworkTasks(base.TestCase):
mock_lb_get.return_value = LB mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmpphora() net = network_tasks.PlugVIPAmphora()
mockSubnet = mock.MagicMock() mockSubnet = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID, amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS} constants.LB_NETWORK_IP: IP_ADDRESS}
@ -951,3 +1013,273 @@ class TestNetworkTasks(base.TestCase):
amphora, mockSubnet) amphora, mockSubnet)
mock_driver.unplug_aap_port.assert_called_once_with( mock_driver.unplug_aap_port.assert_called_once_with(
LB.vip, self.db_amphora_mock, mockSubnet) LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.controller.worker.v2.tasks.network_tasks.DeletePort.'
'update_progress')
def test_delete_port(self, mock_update_progress, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.delete_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT,
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.admin_down_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom')]
net_task = network_tasks.DeletePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test port ID is None (no-op)
net_task.execute(None)
mock_update_progress.assert_not_called()
mock_driver.delete_port.assert_not_called()
# Test successful delete
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_called_once_with(0.5)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test exception and successful retry
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
# Test passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test passive failure admin down failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test non-passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
mock_driver.admin_down_port.side_effect = [
exceptions.OctaviaException('boom')]
self.assertRaises(exceptions.OctaviaException, net_task.execute,
PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_not_called()
def test_create_vip_base_port(self, mock_get_net_driver):
AMP_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
VIP_NETWORK_ID = uuidutils.generate_uuid()
VIP_QOS_ID = uuidutils.generate_uuid()
VIP_SG_ID = uuidutils.generate_uuid()
VIP_SUBNET_ID = uuidutils.generate_uuid()
VIP_IP_ADDRESS = '203.0.113.81'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip_dict = {constants.IP_ADDRESS: VIP_IP_ADDRESS,
constants.NETWORK_ID: VIP_NETWORK_ID,
constants.QOS_POLICY_ID: VIP_QOS_ID,
constants.SUBNET_ID: VIP_SUBNET_ID}
port_mock = mock.MagicMock()
port_mock.id = PORT_ID
mock_driver.create_port.side_effect = [
port_mock, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
net_task = network_tasks.CreateVIPBasePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test execute
result = net_task.execute(vip_dict, VIP_SG_ID, AMP_ID)
self.assertEqual(port_mock.to_dict(), result)
mock_driver.create_port.assert_called_once_with(
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID],
qos_policy_id=VIP_QOS_ID)
# Test execute exception
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, net_task.execute,
vip_dict, None, AMP_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test revert exception
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
@mock.patch('time.sleep')
def test_admin_down_port(self, mock_sleep, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
port_down_mock = mock.MagicMock()
port_down_mock.status = constants.DOWN
port_up_mock = mock.MagicMock()
port_up_mock.status = constants.UP
mock_driver.set_port_admin_state_up.side_effect = [
mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT,
Exception('boom')]
mock_driver.get_port.side_effect = [port_down_mock, port_up_mock]
net_task = network_tasks.AdminDownPort()
# Test execute
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test passive fail on port not found
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_not_called()
# Test passive fail on port stays up
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
PORT_ID)
mock_driver.set_port_admin_state_up.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
# Test revert exception passive failure
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
@mock.patch('octavia.common.utils.get_vip_security_group_name')
def test_get_vip_security_group_id(self, mock_get_sg_name,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
SG_NAME = 'fake_SG_name'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_get_sg_name.return_value = SG_NAME
sg_mock = mock.MagicMock()
sg_mock.id = SG_ID
mock_driver.get_security_group.side_effect = [
sg_mock, None, net_base.SecurityGroupNotFound,
net_base.SecurityGroupNotFound]
net_task = network_tasks.GetVIPSecurityGroupID()
# Test execute
result = net_task.execute(LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute with empty get subnet response
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups enabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = True
self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute,
LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups disabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = False
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)

View File

@ -0,0 +1,47 @@
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from taskflow import retry
from octavia.controller.worker.v2.tasks import retry_tasks
import octavia.tests.unit.base as base
class TestRetryTasks(base.TestCase):
def setUp(self):
super(TestRetryTasks, self).setUp()
@mock.patch('time.sleep')
def test_sleeping_retry_times_controller(self, mock_sleep):
retry_ctrlr = retry_tasks.SleepingRetryTimesController(
attempts=2, name='test_retry')
# Test on_failure that should RETRY
history = ['boom']
result = retry_ctrlr.on_failure(history)
self.assertEqual(retry.RETRY, result)
# Test on_failure retries exhausted, should REVERT
history = ['boom', 'bang', 'pow']
result = retry_ctrlr.on_failure(history)
self.assertEqual(retry.REVERT, result)
# Test revert - should not raise
retry_ctrlr.revert(history)

View File

@ -21,6 +21,7 @@ import os
import graphviz import graphviz
from taskflow import engines from taskflow import engines
from octavia.api.drivers import utils
from octavia.common import constants from octavia.common import constants
from octavia.tests.common import data_model_helpers as dmh from octavia.tests.common import data_model_helpers as dmh
@ -56,6 +57,9 @@ def generate(flow_list, output_directory):
amp1 = dmh.generate_amphora() amp1 = dmh.generate_amphora()
amp2 = dmh.generate_amphora() amp2 = dmh.generate_amphora()
lb = dmh.generate_load_balancer(amphorae=[amp1, amp2]) lb = dmh.generate_load_balancer(amphorae=[amp1, amp2])
if 'v2' in current_tuple[0]:
lb = utils.lb_dict_to_provider_dict(lb.to_dict())
amp1 = amp1.to_dict()
current_engine = engines.load( current_engine = engines.load(
get_flow_method(amp1, 2)) get_flow_method(amp1, 2))
elif (current_tuple[1] == 'LoadBalancerFlows' and elif (current_tuple[1] == 'LoadBalancerFlows' and
@ -66,11 +70,27 @@ def generate(flow_list, output_directory):
elif (current_tuple[1] == 'LoadBalancerFlows' and elif (current_tuple[1] == 'LoadBalancerFlows' and
current_tuple[2] == 'get_delete_load_balancer_flow'): current_tuple[2] == 'get_delete_load_balancer_flow'):
lb = dmh.generate_load_balancer() lb = dmh.generate_load_balancer()
if 'v2' in current_tuple[0]:
lb = utils.lb_dict_to_provider_dict(lb.to_dict())
delete_flow = get_flow_method(lb)
else:
delete_flow, store = get_flow_method(lb) delete_flow, store = get_flow_method(lb)
current_engine = engines.load(delete_flow) current_engine = engines.load(delete_flow)
elif (current_tuple[1] == 'LoadBalancerFlows' and elif (current_tuple[1] == 'LoadBalancerFlows' and
current_tuple[2] == 'get_cascade_delete_load_balancer_flow'): current_tuple[2] == 'get_cascade_delete_load_balancer_flow'):
listeners = [{constants.LISTENER_ID:
'368dffc7-7440-4ee0-aca5-11052d001b05'},
{constants.LISTENER_ID:
'd9c45ec4-9dbe-491b-9f21-6886562348bf'}]
pools = [{constants.POOL_ID:
'6886a40b-1f2a-41a3-9ece-5c51845a7ac4'},
{constants.POOL_ID:
'08ada7a2-3eff-42c6-bdd8-b6f2ecd73358'}]
lb = dmh.generate_load_balancer() lb = dmh.generate_load_balancer()
if 'v2' in current_tuple[0]:
lb = utils.lb_dict_to_provider_dict(lb.to_dict())
delete_flow = get_flow_method(lb, listeners, pools)
else:
delete_flow, store = get_flow_method(lb) delete_flow, store = get_flow_method(lb)
current_engine = engines.load(delete_flow) current_engine = engines.load(delete_flow)
elif (current_tuple[1] == 'LoadBalancerFlows' and elif (current_tuple[1] == 'LoadBalancerFlows' and
@ -80,6 +100,13 @@ def generate(flow_list, output_directory):
lb = dmh.generate_load_balancer( lb = dmh.generate_load_balancer(
amphorae=[amp1, amp2], amphorae=[amp1, amp2],
topology=constants.TOPOLOGY_ACTIVE_STANDBY) topology=constants.TOPOLOGY_ACTIVE_STANDBY)
if 'v2' in current_tuple[0]:
lb = utils.lb_dict_to_provider_dict(lb.to_dict())
flavor = {constants.LOADBALANCER_TOPOLOGY:
constants.TOPOLOGY_ACTIVE_STANDBY}
lb[constants.FLAVOR] = flavor
amp1 = amp1.to_dict()
amp2 = amp2.to_dict()
current_engine = engines.load( current_engine = engines.load(
get_flow_method([amp1, amp2], lb)) get_flow_method([amp1, amp2], lb))
elif (current_tuple[1] == 'MemberFlows' and elif (current_tuple[1] == 'MemberFlows' and

32
tools/flow-list-v2.txt Normal file
View File

@ -0,0 +1,32 @@
# List of TaskFlow flows that should be documented
# Some flows are used by other flows, so just list the primary flows here
# Format:
# module class flow
octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows get_create_amphora_flow
octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows get_failover_amphora_flow
octavia.controller.worker.v2.flows.amphora_flows AmphoraFlows cert_rotate_amphora_flow
octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_create_load_balancer_flow
octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_delete_load_balancer_flow
octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_cascade_delete_load_balancer_flow
octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_update_load_balancer_flow
octavia.controller.worker.v2.flows.load_balancer_flows LoadBalancerFlows get_failover_LB_flow
octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_listener_flow
octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_create_all_listeners_flow
octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_delete_listener_flow
octavia.controller.worker.v2.flows.listener_flows ListenerFlows get_update_listener_flow
octavia.controller.worker.v2.flows.pool_flows PoolFlows get_create_pool_flow
octavia.controller.worker.v2.flows.pool_flows PoolFlows get_delete_pool_flow
octavia.controller.worker.v2.flows.pool_flows PoolFlows get_update_pool_flow
octavia.controller.worker.v2.flows.member_flows MemberFlows get_create_member_flow
octavia.controller.worker.v2.flows.member_flows MemberFlows get_delete_member_flow
octavia.controller.worker.v2.flows.member_flows MemberFlows get_update_member_flow
octavia.controller.worker.v2.flows.member_flows MemberFlows get_batch_update_members_flow
octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_create_health_monitor_flow
octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_delete_health_monitor_flow
octavia.controller.worker.v2.flows.health_monitor_flows HealthMonitorFlows get_update_health_monitor_flow
octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_create_l7policy_flow
octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_delete_l7policy_flow
octavia.controller.worker.v2.flows.l7policy_flows L7PolicyFlows get_update_l7policy_flow
octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_create_l7rule_flow
octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_delete_l7rule_flow
octavia.controller.worker.v2.flows.l7rule_flows L7RuleFlows get_update_l7rule_flow