Support creating an LB in a specified AZ

Co-Authored-By: Adam Harwell <flux.adam@gmail.com>
Change-Id: I55d6c1a0b3e6060d6dacc13ee67d87f0219ef7de
changes/62/693762/22
Sam Morrison 3 years ago committed by Adam Harwell
parent 45cb1e4e16
commit 3cce347129

@ -130,35 +130,37 @@ and validated with the following exceptions:
As of the writing of this specification the create load balancer object may
contain the following:
+-----------------+--------+-----------------------------------------------+
| Name | Type | Description |
+=================+========+===============================================+
| admin_state_up | bool | Admin state: True if up, False if down. |
+-----------------+--------+-----------------------------------------------+
| description | string | A human-readable description for the resource.|
+-----------------+--------+-----------------------------------------------+
| flavor | dict | The flavor keys and values. |
+-----------------+--------+-----------------------------------------------+
| listeners | list | A list of `Listener objects`_. |
+-----------------+--------+-----------------------------------------------+
| loadbalancer_id | string | ID of load balancer to create. |
+-----------------+--------+-----------------------------------------------+
| name | string | Human-readable name of the resource. |
+-----------------+--------+-----------------------------------------------+
| pools | list | A list of `Pool object`_. |
+-----------------+--------+-----------------------------------------------+
| project_id | string | ID of the project owning this resource. |
+-----------------+--------+-----------------------------------------------+
| vip_address | string | The IP address of the Virtual IP (VIP). |
+-----------------+--------+-----------------------------------------------+
| vip_network_id | string | The ID of the network for the VIP. |
+-----------------+--------+-----------------------------------------------+
| vip_port_id | string | The ID of the VIP port. |
+-----------------+--------+-----------------------------------------------+
|vip_qos_policy_id| string | The ID of the qos policy for the VIP. |
+-----------------+--------+-----------------------------------------------+
| vip_subnet_id | string | The ID of the subnet for the VIP. |
+-----------------+--------+-----------------------------------------------+
+-------------------+--------+-----------------------------------------------+
| Name | Type | Description |
+===================+========+===============================================+
| admin_state_up | bool | Admin state: True if up, False if down. |
+-------------------+--------+-----------------------------------------------+
| description | string | A human-readable description for the resource.|
+-------------------+--------+-----------------------------------------------+
| flavor | dict | The flavor keys and values. |
+-------------------+--------+-----------------------------------------------+
| availability_zone | dict | The availability zone keys and values. |
+-------------------+--------+-----------------------------------------------+
| listeners | list | A list of `Listener objects`_. |
+-------------------+--------+-----------------------------------------------+
| loadbalancer_id | string | ID of load balancer to create. |
+-------------------+--------+-----------------------------------------------+
| name | string | Human-readable name of the resource. |
+-------------------+--------+-----------------------------------------------+
| pools | list | A list of `Pool object`_. |
+-------------------+--------+-----------------------------------------------+
| project_id | string | ID of the project owning this resource. |
+-------------------+--------+-----------------------------------------------+
| vip_address | string | The IP address of the Virtual IP (VIP). |
+-------------------+--------+-----------------------------------------------+
| vip_network_id | string | The ID of the network for the VIP. |
+-------------------+--------+-----------------------------------------------+
| vip_port_id | string | The ID of the VIP port. |
+-------------------+--------+-----------------------------------------------+
| vip_qos_policy_id | string | The ID of the qos policy for the VIP. |
+-------------------+--------+-----------------------------------------------+
| vip_subnet_id | string | The ID of the subnet for the VIP. |
+-------------------+--------+-----------------------------------------------+
The driver is expected to validate that the driver supports the request
and raise an exception if the request cannot be accepted.
@ -1696,10 +1698,11 @@ flavor is supported. Both functions are synchronous.
.. _flavor specification: ../specs/version1.0/flavors.html
get_supported_flavor_keys
^^^^^^^^^^^^^^^^^^^^^^^^^
get_supported_flavor_metadata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Retrieves a dictionary of supported flavor keys and their description.
For example:
.. code-block:: python
@ -1748,6 +1751,71 @@ Following are interface definitions for flavor support:
"""
raise NotImplementedError()
Availability Zone
-----------------
Octavia availability zones have no explicit spec, but are modeled closely
after the existing `flavor specification`_.
Support for availability_zones will be provided through two provider driver
interfaces, one to query supported availability zone metadata keys and another
to validate that an availability zone is supported. Both functions are
synchronous.
get_supported_availability_zone_metadata
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Retrieves a dictionary of supported availability zone keys and their
description. For example:
.. code-block:: python
{"compute_zone": "The compute availability zone to use for this loadbalancer.",
"management_network": "The management network ID for the loadbalancer."}
validate_availability_zone
^^^^^^^^^^^^^^^^^^^^^^^^^^
Validates that the driver supports the availability zone metadata dictionary.
The validate_availability_zone method will be passed an availability zone
metadata dictionary that the driver will validate. This is used when an
operator uploads a new availability zone that applies to the driver.
The validate_availability_zone method will either return or raise a
``UnsupportedOptionError`` exception.
Following are interface definitions for availability zone support:
.. code-block:: python
def get_supported_availability_zone_metadata():
"""Returns a dict of supported availability zone metadata keys.
The returned dictionary will include key/value pairs, 'name' and
'description.'
:returns: The availability zone metadata dictionary
:raises DriverError: An unexpected error occurred in the driver.
:raises NotImplementedError: The driver does not support AZs.
"""
raise NotImplementedError()
.. code-block:: python
def validate_availability_zone(availability_zone_metadata):
"""Validates if driver can support the availability zone.
:param availability_zone_metadata: Dictionary with az metadata.
:type availability_zone_metadata: dict
:return: Nothing if the availability zone is valid and supported.
:raises DriverError: An unexpected error occurred in the driver.
:raises NotImplementedError: The driver does not support availability
zones.
:raises UnsupportedOptionError: if driver does not
support one of the configuration options.
"""
raise NotImplementedError()
Exception Model
---------------

@ -84,8 +84,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
def loadbalancer_create(self, loadbalancer):
if loadbalancer.flavor == driver_dm.Unset:
loadbalancer.flavor = None
if loadbalancer.availability_zone == driver_dm.Unset:
loadbalancer.availability_zone = None
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
consts.FLAVOR: loadbalancer.flavor}
consts.FLAVOR: loadbalancer.flavor,
consts.AVAILABILITY_ZONE: loadbalancer.availability_zone}
self.client.cast({}, 'create_load_balancer', **payload)
def loadbalancer_delete(self, loadbalancer, cascade=False):

@ -86,8 +86,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
def loadbalancer_create(self, loadbalancer):
if loadbalancer.flavor == driver_dm.Unset:
loadbalancer.flavor = None
if loadbalancer.availability_zone == driver_dm.Unset:
loadbalancer.availability_zone = None
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
consts.FLAVOR: loadbalancer.flavor}
consts.FLAVOR: loadbalancer.flavor,
consts.AVAILABILITY_ZONE: loadbalancer.availability_zone}
self.client.cast({}, 'create_load_balancer', **payload)
def loadbalancer_delete(self, loadbalancer, cascade=False):

@ -328,6 +328,46 @@ class LoadBalancersController(base.BaseController):
raise exceptions.DisabledOption(option='flavor',
value=load_balancer.flavor_id)
def _validate_and_return_az_dict(self, lock_session, driver, lb_dict):
az_dict = {}
if 'availability_zone' in lb_dict:
try:
az = self.repositories.availability_zone.get(
lock_session, name=lb_dict['availability_zone'])
az_dict = (
self.repositories.availability_zone
.get_availability_zone_metadata_dict(lock_session, az.name)
)
except sa_exception.NoResultFound:
raise exceptions.ValidationException(
detail=_("Invalid availability_zone."))
# Make sure the driver will still accept the availability zone metadata
if az_dict:
try:
driver_utils.call_provider(driver.name,
driver.validate_availability_zone,
az_dict)
except NotImplementedError:
raise exceptions.ProviderNotImplementedError(
prov=driver.name, user_msg="This provider does not support"
" availability zones.")
return az_dict
def _validate_availability_zone(self, session, load_balancer):
if not isinstance(load_balancer.availability_zone, wtypes.UnsetType):
az = self.repositories.availability_zone.get(
session, name=load_balancer.availability_zone)
if not az:
raise exceptions.ValidationException(
detail=_("Invalid availability zone."))
if not az.enabled:
raise exceptions.DisabledOption(
option='availability_zone',
value=load_balancer.availability_zone)
@wsme_pecan.wsexpose(lb_types.LoadBalancerFullRootResponse,
body=lb_types.LoadBalancerRootPOST, status_code=201)
def post(self, load_balancer):
@ -351,6 +391,8 @@ class LoadBalancersController(base.BaseController):
self._validate_flavor(context.session, load_balancer)
self._validate_availability_zone(context.session, load_balancer)
provider = self._get_provider(context.session, load_balancer)
# Load the driver early as it also provides validation
@ -383,6 +425,9 @@ class LoadBalancersController(base.BaseController):
flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver,
lb_dict)
az_dict = self._validate_and_return_az_dict(lock_session, driver,
lb_dict)
db_lb = self.repositories.create_load_balancer_and_vip(
lock_session, lb_dict, vip_dict)
@ -391,6 +436,9 @@ class LoadBalancersController(base.BaseController):
# flavor dict instead of just the flavor_id we store in the DB.
lb_dict['flavor'] = flavor_dict
# Do the same with the availability_zone dict
lb_dict['availability_zone'] = az_dict
# See if the provider driver wants to create the VIP port
octavia_owned = False
try:

@ -55,6 +55,7 @@ class LoadBalancerResponse(BaseLoadBalancerType):
flavor_id = wtypes.wsattr(wtypes.UuidType())
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
availability_zone = wtypes.wsattr(wtypes.StringType())
@classmethod
def from_data_model(cls, data_model, children=False):
@ -122,6 +123,7 @@ class LoadBalancerPOST(BaseLoadBalancerType):
provider = wtypes.wsattr(wtypes.StringType(max_length=64))
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255)))
flavor_id = wtypes.wsattr(wtypes.UuidType())
availability_zone = wtypes.wsattr(wtypes.StringType(max_length=255))
class LoadBalancerRootPOST(types.BaseType):

@ -461,7 +461,7 @@ class LoadBalancer(BaseDataModel):
topology=None, vip=None, listeners=None, amphorae=None,
pools=None, vrrp_group=None, server_group_id=None,
created_at=None, updated_at=None, provider=None, tags=None,
flavor_id=None):
flavor_id=None, availability_zone=None):
self.id = id
self.project_id = project_id
@ -482,6 +482,7 @@ class LoadBalancer(BaseDataModel):
self.provider = provider
self.tags = tags or []
self.flavor_id = flavor_id
self.availability_zone = availability_zone
def update(self, update_dict):
for key, value in update_dict.items():

@ -24,7 +24,8 @@ class ComputeBase(object):
def build(self, name="amphora_name", amphora_flavor=None,
image_id=None, image_tag=None, image_owner=None,
key_name=None, sec_groups=None, network_ids=None,
config_drive_files=None, user_data=None, server_group_id=None):
config_drive_files=None, user_data=None, server_group_id=None,
availability_zone=None):
"""Build a new amphora.
:param name: Optional name for Amphora
@ -46,6 +47,7 @@ class ComputeBase(object):
well or a string
:param server_group_id: Optional server group id(uuid) which is used
for anti_affinity feature
:param availability_zone: Name of the compute availability zone.
:raises ComputeBuildException: if compute failed to build amphora
:returns: UUID of amphora

@ -32,15 +32,16 @@ class NoopManager(object):
image_id=None, image_tag=None, image_owner=None,
key_name=None, sec_groups=None, network_ids=None,
config_drive_files=None, user_data=None, port_ids=None,
server_group_id=None):
server_group_id=None, availability_zone=None):
LOG.debug("Compute %s no-op, build name %s, amphora_flavor %s, "
"image_id %s, image_tag %s, image_owner %s, key_name %s, "
"sec_groups %s, network_ids %s, config_drive_files %s, "
"user_data %s, port_ids %s, server_group_id %s",
"user_data %s, port_ids %s, server_group_id %s, "
"availability_zone %s",
self.__class__.__name__,
name, amphora_flavor, image_id, image_tag, image_owner,
key_name, sec_groups, network_ids, config_drive_files,
user_data, port_ids, server_group_id)
user_data, port_ids, server_group_id, availability_zone)
self.computeconfig[(name, amphora_flavor, image_id, image_tag,
image_owner, key_name, user_data,
server_group_id)] = (
@ -127,13 +128,13 @@ class NoopComputeDriver(driver_base.ComputeBase):
image_id=None, image_tag=None, image_owner=None,
key_name=None, sec_groups=None, network_ids=None,
config_drive_files=None, user_data=None, port_ids=None,
server_group_id=None):
server_group_id=None, availability_zone=None):
compute_id = self.driver.build(name, amphora_flavor,
image_id, image_tag, image_owner,
key_name, sec_groups, network_ids,
config_drive_files, user_data, port_ids,
server_group_id)
server_group_id, availability_zone)
return compute_id
def delete(self, compute_id):

@ -100,7 +100,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
image_id=None, image_tag=None, image_owner=None,
key_name=None, sec_groups=None, network_ids=None,
port_ids=None, config_drive_files=None, user_data=None,
server_group_id=None):
server_group_id=None, availability_zone=None):
'''Create a new virtual machine.
:param name: optional name for amphora
@ -123,6 +123,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
well or a string
:param server_group_id: Optional server group id(uuid) which is used
for anti_affinity feature
:param availability_zone: Name of the compute availability zone.
:raises ComputeBuildException: if nova failed to build virtual machine
:returns: UUID of amphora
@ -141,6 +142,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
server_group = None if server_group_id is None else {
"group": server_group_id}
az_name = availability_zone or CONF.nova.availability_zone
image_id = _get_image_uuid(
self._glance_client, image_id, image_tag, image_owner)
@ -176,7 +178,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
userdata=user_data,
config_drive=True,
scheduler_hints=server_group,
availability_zone=CONF.nova.availability_zone
availability_zone=az_name
)
return amphora.id

@ -20,6 +20,7 @@ from oslo_log import log as logging
from oslo_utils import timeutils
from sqlalchemy.orm import exc as sqlalchemy_exceptions
from octavia.common import constants
from octavia.controller.worker.v1 import controller_worker as cw
from octavia.db import api as db_api
from octavia.db import repositories as repo
@ -32,6 +33,7 @@ class SpareAmphora(object):
def __init__(self):
self.amp_repo = repo.AmphoraRepository()
self.spares_repo = repo.SparesPoolRepository()
self.az_repo = repo.AvailabilityZoneRepository()
self.cw = cw.ControllerWorker()
def spare_check(self):
@ -46,28 +48,51 @@ class SpareAmphora(object):
spare_amp_row = self.spares_repo.get_for_update(lock_session)
conf_spare_cnt = CONF.house_keeping.spare_amphora_pool_size
curr_spare_cnt = self.amp_repo.get_spare_amphora_count(session)
LOG.debug("Required Spare Amphora count : %d", conf_spare_cnt)
LOG.debug("Current Spare Amphora count : %d", curr_spare_cnt)
diff_count = conf_spare_cnt - curr_spare_cnt
availability_zones, links = self.az_repo.get_all(session,
enabled=True)
compute_zones = set()
for az in availability_zones:
az_meta = self.az_repo.get_availability_zone_metadata_dict(
session, az.name)
compute_zones.add(az_meta.get(constants.COMPUTE_ZONE))
# If no AZs objects then build in the configured AZ (even if None)
# Also if configured AZ is not None then also build in there
# as could be different to the current AZs objects.
if CONF.nova.availability_zone or not compute_zones:
compute_zones.add(CONF.nova.availability_zone)
# When the current spare amphora is less than required
amp_booting = []
if diff_count > 0:
LOG.info("Initiating creation of %d spare amphora.",
diff_count)
# Call Amphora Create Flow diff_count times
with futures.ThreadPoolExecutor(
max_workers=CONF.house_keeping.spare_amphora_pool_size
) as executor:
for i in range(1, diff_count + 1):
LOG.debug("Starting amphorae number %d ...", i)
amp_booting.append(
executor.submit(self.cw.create_amphora))
else:
LOG.debug("Current spare amphora count satisfies the "
"requirement")
for az_name in compute_zones:
# TODO(rm_work): If az_name is None, this will get ALL spares
# across all AZs. This is the safest/most backwards compatible
# way I can think of, as cached_zone on the amphora records
# won't ever match. This should not impact any existing deploys
# with no AZ configured, as the behavior should be identical
# in that case. In the case of multiple AZs configured, it will
# simply ensure there are at least <N> spares *somewhere*, but
# will function more accurately if the operator actually
# configures the AZ setting properly.
curr_spare_cnt = self.amp_repo.get_spare_amphora_count(
session, availability_zone=az_name)
LOG.debug("Current Spare Amphora count for AZ %s: %d",
az_name, curr_spare_cnt)
diff_count = conf_spare_cnt - curr_spare_cnt
# When the current spare amphora is less than required
if diff_count > 0:
LOG.info("Initiating creation of %d spare amphora "
"for az %s.", diff_count, az_name)
# Call Amphora Create Flow diff_count times
with futures.ThreadPoolExecutor(
max_workers=conf_spare_cnt) as executor:
for i in range(1, diff_count + 1):
LOG.debug("Starting amphorae number %d ...", i)
amp_booting.append(executor.submit(
self.cw.create_amphora, az_name))
else:
LOG.debug("Current spare amphora count for AZ %s "
"satisfies the requirement", az_name)
# Wait for the amphora boot threads to finish
futures.wait(amp_booting)

@ -40,9 +40,10 @@ class Endpoints(object):
).driver
def create_load_balancer(self, context, load_balancer_id,
flavor=None):
flavor=None, availability_zone=None):
LOG.info('Creating load balancer \'%s\'...', load_balancer_id)
self.worker.create_load_balancer(load_balancer_id, flavor)
self.worker.create_load_balancer(load_balancer_id, flavor,
availability_zone)
def update_load_balancer(self, context, load_balancer_id,
load_balancer_updates):

@ -37,9 +37,10 @@ class Endpoints(object):
self.worker = controller_worker.ControllerWorker()
def create_load_balancer(self, context, load_balancer_id,
flavor=None):
flavor=None, availability_zone=None):
LOG.info('Creating load balancer \'%s\'...', load_balancer_id)
self.worker.create_load_balancer(load_balancer_id, flavor)
self.worker.create_load_balancer(load_balancer_id, flavor,
availability_zone)
def update_load_balancer(self, context, load_balancer_id,
load_balancer_updates):

@ -70,6 +70,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._l7policy_repo = repo.L7PolicyRepository()
self._l7rule_repo = repo.L7RuleRepository()
self._flavor_repo = repo.FlavorRepository()
self._az_repo = repo.AvailabilityZoneRepository()
super(ControllerWorker, self).__init__()
@ -84,7 +85,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
return repo.get(db_apis.get_session(), id=id)
def create_amphora(self):
def create_amphora(self, availability_zone=None):
"""Creates an Amphora.
This is used to create spare amphora.
@ -92,12 +93,17 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
:returns: amphora_id
"""
try:
store = {constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None}
if availability_zone:
store[constants.AVAILABILITY_ZONE] = (
self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(), availability_zone))
create_amp_tf = self._taskflow_load(
self._amphora_flows.get_create_amphora_flow(),
store={constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None}
)
store=store)
with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
create_amp_tf.run()
@ -306,7 +312,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
def create_load_balancer(self, load_balancer_id, flavor=None):
def create_load_balancer(self, load_balancer_id, flavor=None,
availability_zone=None):
"""Creates a load balancer by allocating Amphorae.
First tries to allocate an existing Amphora in READY state.
@ -328,7 +335,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store = {constants.LOADBALANCER_ID: load_balancer_id,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: flavor}
constants.FLAVOR: flavor,
constants.AVAILABILITY_ZONE: availability_zone}
topology = lb.topology
@ -846,6 +854,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
db_apis.get_session(), lb.flavor_id))
else:
stored_params[constants.FLAVOR] = {}
if lb and lb.availability_zone:
stored_params[constants.AVAILABILITY_ZONE] = (
self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(), lb.availability_zone))
else:
stored_params[constants.AVAILABILITY_ZONE] = {}
failover_amphora_tf = self._taskflow_load(
self._amphora_flows.get_failover_flow(

@ -55,12 +55,13 @@ class AmphoraFlows(object):
create_amphora_flow.add(compute_tasks.CertComputeCreate(
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY, constants.FLAVOR),
constants.BUILD_TYPE_PRIORITY, constants.FLAVOR,
constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
else:
create_amphora_flow.add(compute_tasks.ComputeCreate(
requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR),
constants.FLAVOR, constants.AVAILABILITY_ZONE),
provides=constants.COMPUTE_ID))
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
@ -145,7 +146,8 @@ class AmphoraFlows(object):
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -155,7 +157,8 @@ class AmphoraFlows(object):
constants.AMPHORA_ID,
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -166,7 +169,8 @@ class AmphoraFlows(object):
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -175,7 +179,8 @@ class AmphoraFlows(object):
requires=(
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
@ -256,7 +261,8 @@ class AmphoraFlows(object):
# Setup the task that maps an amphora to a load balancer
allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora(
name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA,
requires=(constants.LOADBALANCER_ID, constants.FLAVOR),
requires=(constants.LOADBALANCER_ID, constants.FLAVOR,
constants.AVAILABILITY_ZONE),
provides=constants.AMPHORA_ID)
# Define a subflow for if we successfully map an amphora

@ -52,7 +52,8 @@ class ComputeCreate(BaseComputeTask):
def execute(self, amphora_id, config_drive_files=None,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None):
server_group_id=None, ports=None, flavor=None,
availability_zone=None):
"""Create an amphora
:returns: an amphora
@ -64,12 +65,7 @@ class ComputeCreate(BaseComputeTask):
LOG.debug("Compute create execute for amphora with id %s", amphora_id)
user_data_config_drive = CONF.controller_worker.user_data_config_drive
key_name = CONF.controller_worker.amp_ssh_key_name
# TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike.
# Remove the following two lines in the S release.
ssh_access = CONF.controller_worker.amp_ssh_access_allowed
key_name = None if not ssh_access else key_name
# Apply an Octavia flavor customizations
if flavor:
@ -81,6 +77,14 @@ class ComputeCreate(BaseComputeTask):
topology = CONF.controller_worker.loadbalancer_topology
amp_compute_flavor = CONF.controller_worker.amp_flavor_id
if availability_zone:
amp_availability_zone = availability_zone.get(
constants.COMPUTE_ZONE)
amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK)
if amp_network:
network_ids = [amp_network]
else:
amp_availability_zone = None
try:
if CONF.haproxy_amphora.build_rate_limit != -1:
self.rate_limit.add_to_build_request_queue(
@ -113,7 +117,8 @@ class ComputeCreate(BaseComputeTask):
port_ids=[port.id for port in ports],
config_drive_files=config_drive_files,
user_data=user_data,
server_group_id=server_group_id)
server_group_id=server_group_id,
availability_zone=amp_availability_zone)
LOG.debug("Server created with id: %s for amphora id: %s",
compute_id, amphora_id)
@ -144,7 +149,8 @@ class ComputeCreate(BaseComputeTask):
class CertComputeCreate(ComputeCreate):
def execute(self, amphora_id, server_pem,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None):
server_group_id=None, ports=None, flavor=None,
availability_zone=None):
"""Create an amphora
:returns: an amphora
@ -162,7 +168,8 @@ class CertComputeCreate(ComputeCreate):
return super(CertComputeCreate, self).execute(
amphora_id, config_drive_files=config_drive_files,
build_type_priority=build_type_priority,
server_group_id=server_group_id, ports=ports, flavor=flavor)
server_group_id=server_group_id, ports=ports, flavor=flavor,
availability_zone=availability_zone)
class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):

@ -499,7 +499,8 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
class MapLoadbalancerToAmphora(BaseDatabaseTask):
"""Maps and assigns a load balancer to an amphora in the database."""
def execute(self, loadbalancer_id, server_group_id=None, flavor=None):
def execute(self, loadbalancer_id, server_group_id=None, flavor=None,
availability_zone=None):
"""Allocates an Amphora for the load balancer in the database.
:param loadbalancer_id: The load balancer id to map to an amphora
@ -522,9 +523,15 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask):
"allocation.")
return None
if availability_zone:
amp_az = availability_zone.get(constants.COMPUTE_ZONE)
else:
amp_az = CONF.nova.availability_zone
amp = self.amphora_repo.allocate_and_associate(
db_apis.get_session(),
loadbalancer_id)
loadbalancer_id,
amp_az)
if amp is None:
LOG.debug("No Amphora available for load balancer with id %s",
loadbalancer_id)

@ -71,6 +71,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
self._l7policy_repo = repo.L7PolicyRepository()
self._l7rule_repo = repo.L7RuleRepository()
self._flavor_repo = repo.FlavorRepository()
self._az_repo = repo.AvailabilityZoneRepository()
super(ControllerWorker, self).__init__()
@ -85,7 +86,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
return repo.get(db_apis.get_session(), id=id)
def create_amphora(self):
def create_amphora(self, availability_zone=None):
"""Creates an Amphora.
This is used to create spare amphora.
@ -93,12 +94,17 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
:returns: amphora_id
"""
try:
store = {constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None,
constants.AVAILABILITY_ZONE: None}
if availability_zone:
store[constants.AVAILABILITY_ZONE] = (
self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(), availability_zone))
create_amp_tf = self._taskflow_load(
self._amphora_flows.get_create_amphora_flow(),
store={constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_SPARES_POOL_PRIORITY,
constants.FLAVOR: None}
)
store=store)
with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG):
create_amp_tf.run()
@ -305,7 +311,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
wait=tenacity.wait_incrementing(
RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX),
stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS))
def create_load_balancer(self, load_balancer_id, flavor=None):
def create_load_balancer(self, load_balancer_id, flavor=None,
availability_zone=None):
"""Creates a load balancer by allocating Amphorae.
First tries to allocate an existing Amphora in READY state.
@ -327,7 +334,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
store = {constants.LOADBALANCER_ID: load_balancer_id,
constants.BUILD_TYPE_PRIORITY:
constants.LB_CREATE_NORMAL_PRIORITY,
constants.FLAVOR: flavor}
constants.FLAVOR: flavor,
constants.AVAILABILITY_ZONE: availability_zone}
topology = lb.topology
@ -873,6 +881,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
db_apis.get_session(), lb.flavor_id))
else:
stored_params[constants.FLAVOR] = {}
if lb and lb.availability_zone:
stored_params[constants.AVAILABILITY_ZONE] = (
self._az_repo.get_availability_zone_metadata_dict(
db_apis.get_session(), lb.availability_zone))
else:
stored_params[constants.AVAILABILITY_ZONE] = {}
failover_amphora_tf = self._taskflow_load(
self._amphora_flows.get_failover_flow(

@ -150,7 +150,8 @@ class AmphoraFlows(object):
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -160,7 +161,8 @@ class AmphoraFlows(object):
constants.AMPHORA_ID,
constants.SERVER_PEM,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -171,7 +173,8 @@ class AmphoraFlows(object):
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.SERVER_GROUP_ID,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
else:
@ -180,7 +183,8 @@ class AmphoraFlows(object):
requires=(
constants.AMPHORA_ID,
constants.BUILD_TYPE_PRIORITY,
constants.FLAVOR
constants.FLAVOR,
constants.AVAILABILITY_ZONE,
),
provides=constants.COMPUTE_ID))
@ -266,7 +270,8 @@ class AmphoraFlows(object):
# Setup the task that maps an amphora to a load balancer
allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora(
name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA,
requires=(constants.LOADBALANCER_ID, constants.FLAVOR),
requires=(constants.LOADBALANCER_ID, constants.FLAVOR,
constants.AVAILABILITY_ZONE),
provides=constants.AMPHORA_ID)
# Define a subflow for if we successfully map an amphora

@ -52,7 +52,8 @@ class ComputeCreate(BaseComputeTask):
def execute(self, amphora_id, config_drive_files=None,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None):
server_group_id=None, ports=None, flavor=None,
availability_zone=None):
"""Create an amphora
:returns: an amphora
@ -64,12 +65,7 @@ class ComputeCreate(BaseComputeTask):
LOG.debug("Compute create execute for amphora with id %s", amphora_id)
user_data_config_drive = CONF.controller_worker.user_data_config_drive
key_name = CONF.controller_worker.amp_ssh_key_name
# TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike.
# Remove the following two lines in the S release.
ssh_access = CONF.controller_worker.amp_ssh_access_allowed
key_name = None if not ssh_access else key_name
# Apply an Octavia flavor customizations
if flavor:
@ -81,6 +77,14 @@ class ComputeCreate(BaseComputeTask):
topology = CONF.controller_worker.loadbalancer_topology
amp_compute_flavor = CONF.controller_worker.amp_flavor_id
if availability_zone:
amp_availability_zone = availability_zone.get(
constants.COMPUTE_ZONE)
amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK)
if amp_network:
network_ids = [amp_network]
else:
amp_availability_zone = None
try:
if CONF.haproxy_amphora.build_rate_limit != -1:
self.rate_limit.add_to_build_request_queue(
@ -113,7 +117,8 @@ class ComputeCreate(BaseComputeTask):
port_ids=[port.id for port in ports],
config_drive_files=config_drive_files,
user_data=user_data,
server_group_id=server_group_id)
server_group_id=server_group_id,
availability_zone=amp_availability_zone)
LOG.debug("Server created with id: %s for amphora id: %s",
compute_id, amphora_id)
@ -144,7 +149,8 @@ class ComputeCreate(BaseComputeTask):
class CertComputeCreate(ComputeCreate):
def execute(self, amphora_id, server_pem,
build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY,
server_group_id=None, ports=None, flavor=None):
server_group_id=None, ports=None, flavor=None,
availability_zone=None):
"""Create an amphora
:returns: an amphora
@ -162,7 +168,8 @@ class CertComputeCreate(ComputeCreate):
return super(CertComputeCreate, self).execute(
amphora_id, config_drive_files=config_drive_files,
build_type_priority=build_type_priority,
server_group_id=server_group_id, ports=ports, flavor=flavor)
server_group_id=server_group_id, ports=ports, flavor=flavor,
availability_zone=availability_zone)
class DeleteAmphoraeOnLoadBalancer(BaseComputeTask):

@ -505,7 +505,8 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
class MapLoadbalancerToAmphora(BaseDatabaseTask):
"""Maps and assigns a load balancer to an amphora in the database."""
def execute(self, loadbalancer_id, server_group_id=None, flavor=None):
def execute(self, loadbalancer_id, server_group_id=None, flavor=None,
availability_zone=None):
"""Allocates an Amphora for the load balancer in the database.
:param loadbalancer_id: The load balancer id to map to an amphora
@ -528,9 +529,15 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask):
"allocation.")
return None
if availability_zone:
amp_az = availability_zone.get(constants.COMPUTE_ZONE)
else:
amp_az = CONF.nova.availability_zone
amp = self.amphora_repo.allocate_and_associate(
db_apis.get_session(),
loadbalancer_id)
loadbalancer_id,
amp_az)
if amp is None:
LOG.debug("No Amphora available for load balancer with id %s",
loadbalancer_id)

@ -0,0 +1,41 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add availability_zone to lb
Revision ID: 8ac4ed24df3a
Revises: c761c8a71579
Create Date: 2019-11-13 08:37:39.392163
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8ac4ed24df3a'
down_revision = 'c761c8a71579'
def upgrade():
op.add_column(u'load_balancer',
sa.Column(u'availability_zone',
sa.String(255),
nullable=True)
)
op.create_foreign_key(
u'fk_load_balancer_availability_zone_name', u'load_balancer',
u'availability_zone', [u'availability_zone'], [u'name']
)

@ -401,6 +401,11 @@ class LoadBalancer(base_models.BASE, base_models.IdMixin,
flavor_id = sa.Column(
sa.String(36),
sa.ForeignKey("flavor.id", name="fk_lb_flavor_id"), nullable=True)
availability_zone = sa.Column(
sa.String(255),
sa.ForeignKey("availability_zone.name",
name="fk_load_balancer_availability_zone_name"),
nullable=True)
class VRRPGroup(base_models.BASE):

@ -1185,7 +1185,8 @@ class AmphoraRepository(BaseRepository):
load_balancer.amphorae.append(amphora)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def allocate_and_associate(self, session, load_balancer_id):
def allocate_and_associate(self, session, load_balancer_id,
availability_zone=None):
"""Allocate an amphora for a load balancer.
For v0.5 this is simple, find a free amp and
@ -1196,13 +1197,23 @@ class AmphoraRepository(BaseRepository):
:param load_balancer_id: The load balancer id to associate
:returns: The amphora ID for the load balancer or None
"""
filters = {
'status': 'READY',
'load_balancer_id': None
}
if availability_zone:
LOG.debug("Filtering amps by zone: %s", availability_zone)
filters['cached_zone'] = availability_zone
with session.begin(subtransactions=True):
amp = session.query(self.model_class).with_for_update().filter_by(
status='READY', load_balancer_id=None).first()
**filters).first()
if amp is None:
return None
if availability_zone:
LOG.debug("Found amp: %s in %s", amp.id, amp.cached_zone)
amp.status = 'ALLOCATED'
amp.load_balancer_id = load_balancer_id
@ -1237,14 +1248,21 @@ class AmphoraRepository(BaseRepository):
return db_lb.to_data_model()
return None
def get_spare_amphora_count(self, session):
def get_spare_amphora_count(self, session, availability_zone=None):
"""Get the count of the spare amphora.
:returns: Number of current spare amphora.
"""
filters = {
'status': consts.AMPHORA_READY,
'load_balancer_id': None
}
if availability_zone is not None:
filters['cached_zone'] = availability_zone
with session.begin(subtransactions=True):
count = session.query(self.model_class).filter_by(
status=consts.AMPHORA_READY, load_balancer_id=None).count()
**filters).count()
return count
@ -1956,7 +1974,7 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository):
def delete(self, serial_session, **filters):
"""Special delete method for availability_zone.
Sets DELETED LBs availability_zone_id to NIL_UUID, then removes the
Sets DELETED LBs availability_zone to NIL_UUID, then removes the
availability_zone.
:param serial_session: A Sql Alchemy database transaction session.
@ -1965,12 +1983,11 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository):
:raises: odb_exceptions.DBReferenceError
:raises: sqlalchemy.orm.exc.NoResultFound
"""
# TODO(sorrison): Uncomment this
# (serial_session.query(models.LoadBalancer).
# filter(models.LoadBalancer.availability_zone_id == filters['id']).
# filter(models.LoadBalancer.provisioning_status == consts.DELETED).
# update({models.LoadBalancer.availability_zone_id: consts.NIL_UUID},
# synchronize_session=False))
(serial_session.query(models.LoadBalancer).
filter(models.LoadBalancer.availability_zone == filters[consts.NAME]).
filter(models.LoadBalancer.provisioning_status == consts.DELETED).
update({models.LoadBalancer.availability_zone: consts.NIL_UUID},
synchronize_session=False))
availability_zone = (
serial_session.query(self.model_class).filter_by(**filters).one())
serial_session.delete(availability_zone)

@ -555,8 +555,6 @@ class TestAvailabilityZones(base.BaseAPITest):
self.assertEqual('name1', response.get('name'))
def test_delete_in_use(self):
# TODO(sorrison): Enable this test
self.skipTest("Enable in next patch when LB can use AZ")
az = self.create_availability_zone(
'name1', 'description', self.azp.get('id'), True)
project_id = uuidutils.generate_uuid()
@ -564,7 +562,7 @@ class TestAvailabilityZones(base.BaseAPITest):
self.create_load_balancer(lb_id, name='lb1',
project_id=project_id,
description='desc1',
availability_zone_name=az.get('name'),
availability_zone=az.get('name'),
admin_state_up=False)
self.delete(self.AZ_PATH.format(az_name=az.get('name')),
status=409)

@ -1031,6 +1031,68 @@ class TestLoadBalancer(base.BaseAPITest):
self.assertEqual('noop_driver', api_lb.get('provider'))
self.assertEqual(test_flavor_id, api_lb.get('flavor_id'))
def test_create_with_availability_zone(self, **optionals):
zone_name = 'nova'
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name)
az = self.create_availability_zone(zone_name, 'description',
azp.get('id'), True)
api_lb = self.test_create(availability_zone=az.get('name'))
self.assertEqual(zone_name, api_lb.get('availability_zone'))
def test_create_az_disabled(self, **optionals):
zone_name = 'nova'
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name)
az = self.create_availability_zone(zone_name, 'description',
azp.get('id'), False)
lb_json = {'name': 'test1',
'vip_subnet_id': uuidutils.generate_uuid(),
'project_id': self.project_id,
'availability_zone': az.get('name'),
}
lb_json.update(optionals)
body = self._build_body(lb_json)
response = self.post(self.LBS_PATH, body, status=400)
ref_faultstring = ('The selected availability_zone is not allowed in '
'this deployment: {}'.format(zone_name))
self.assertEqual(ref_faultstring, response.json.get('faultstring'))
def test_create_az_missing(self, **optionals):
lb_json = {'name': 'test1',
'vip_subnet_id': uuidutils.generate_uuid(),
'project_id': self.project_id,
'availability_zone': 'bogus-az',
}
lb_json.update(optionals)
body = self._build_body(lb_json)
response = self.post(self.LBS_PATH, body, status=400)
ref_faultstring = 'Validation failure: Invalid availability zone.'
self.assertEqual(ref_faultstring, response.json.get('faultstring'))
@mock.patch('octavia.api.drivers.utils.call_provider')
def test_create_az_unsupported(self, mock_provider):
zone_name = 'nova'
azp = self.create_availability_zone_profile(
'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name)
az = self.create_availability_zone(zone_name, 'description',
azp.get('id'), True)
mock_provider.side_effect = NotImplementedError
lb_json = {'name': 'test1',
'vip_subnet_id': uuidutils.generate_uuid(),
'project_id': self.project_id,
'availability_zone': az.get('name'),
}
body = self._build_body(lb_json)
response = self.post(self.LBS_PATH, body, status=501)
ref_faultstring = ("Provider \'noop_driver\' does not support a "
"requested action: This provider does not support "
"availability zones.")
self.assertEqual(ref_faultstring, response.json.get('faultstring'))
def test_matching_providers(self, **optionals):
fp = self.create_flavor_profile('test1', 'noop_driver',
'{"image": "ubuntu"}')
@ -2504,6 +2566,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
expected_lb = {
'description': '',
'admin_state_up': True,
'availability_zone': None,
'provisioning_status': constants.PENDING_CREATE,
'operating_status': constants.OFFLINE,
# TODO(rm_work): vip_network_id is a weird case, as it will be
@ -2515,7 +2578,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
'vip_qos_policy_id': None,
'flavor_id': None,
'provider': 'noop_driver',
'tags': []
'tags': [],
}
expected_lb.update(create_lb)
expected_lb['listeners'] = expected_listeners

@ -168,6 +168,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
del lb_dm_dict['pools']
del lb_dm_dict['created_at']
del lb_dm_dict['updated_at']
self.assertIsNone(lb_dm_dict.pop('availability_zone'))
self.assertEqual(lb, lb_dm_dict)
vip_dm_dict = lb_dm.vip.to_dict()
vip_dm_dict['load_balancer_id'] = lb_dm.id