diff --git a/doc/source/contributor/guides/providers.rst b/doc/source/contributor/guides/providers.rst index 74ed171112..3592a44e0d 100644 --- a/doc/source/contributor/guides/providers.rst +++ b/doc/source/contributor/guides/providers.rst @@ -130,35 +130,37 @@ and validated with the following exceptions: As of the writing of this specification the create load balancer object may contain the following: -+-----------------+--------+-----------------------------------------------+ -| Name | Type | Description | -+=================+========+===============================================+ -| admin_state_up | bool | Admin state: True if up, False if down. | -+-----------------+--------+-----------------------------------------------+ -| description | string | A human-readable description for the resource.| -+-----------------+--------+-----------------------------------------------+ -| flavor | dict | The flavor keys and values. | -+-----------------+--------+-----------------------------------------------+ -| listeners | list | A list of `Listener objects`_. | -+-----------------+--------+-----------------------------------------------+ -| loadbalancer_id | string | ID of load balancer to create. | -+-----------------+--------+-----------------------------------------------+ -| name | string | Human-readable name of the resource. | -+-----------------+--------+-----------------------------------------------+ -| pools | list | A list of `Pool object`_. | -+-----------------+--------+-----------------------------------------------+ -| project_id | string | ID of the project owning this resource. | -+-----------------+--------+-----------------------------------------------+ -| vip_address | string | The IP address of the Virtual IP (VIP). | -+-----------------+--------+-----------------------------------------------+ -| vip_network_id | string | The ID of the network for the VIP. | -+-----------------+--------+-----------------------------------------------+ -| vip_port_id | string | The ID of the VIP port. | -+-----------------+--------+-----------------------------------------------+ -|vip_qos_policy_id| string | The ID of the qos policy for the VIP. | -+-----------------+--------+-----------------------------------------------+ -| vip_subnet_id | string | The ID of the subnet for the VIP. | -+-----------------+--------+-----------------------------------------------+ ++-------------------+--------+-----------------------------------------------+ +| Name | Type | Description | ++===================+========+===============================================+ +| admin_state_up | bool | Admin state: True if up, False if down. | ++-------------------+--------+-----------------------------------------------+ +| description | string | A human-readable description for the resource.| ++-------------------+--------+-----------------------------------------------+ +| flavor | dict | The flavor keys and values. | ++-------------------+--------+-----------------------------------------------+ +| availability_zone | dict | The availability zone keys and values. | ++-------------------+--------+-----------------------------------------------+ +| listeners | list | A list of `Listener objects`_. | ++-------------------+--------+-----------------------------------------------+ +| loadbalancer_id | string | ID of load balancer to create. | ++-------------------+--------+-----------------------------------------------+ +| name | string | Human-readable name of the resource. | ++-------------------+--------+-----------------------------------------------+ +| pools | list | A list of `Pool object`_. | ++-------------------+--------+-----------------------------------------------+ +| project_id | string | ID of the project owning this resource. | ++-------------------+--------+-----------------------------------------------+ +| vip_address | string | The IP address of the Virtual IP (VIP). | ++-------------------+--------+-----------------------------------------------+ +| vip_network_id | string | The ID of the network for the VIP. | ++-------------------+--------+-----------------------------------------------+ +| vip_port_id | string | The ID of the VIP port. | ++-------------------+--------+-----------------------------------------------+ +| vip_qos_policy_id | string | The ID of the qos policy for the VIP. | ++-------------------+--------+-----------------------------------------------+ +| vip_subnet_id | string | The ID of the subnet for the VIP. | ++-------------------+--------+-----------------------------------------------+ The driver is expected to validate that the driver supports the request and raise an exception if the request cannot be accepted. @@ -1696,10 +1698,11 @@ flavor is supported. Both functions are synchronous. .. _flavor specification: ../specs/version1.0/flavors.html -get_supported_flavor_keys -^^^^^^^^^^^^^^^^^^^^^^^^^ +get_supported_flavor_metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Retrieves a dictionary of supported flavor keys and their description. +For example: .. code-block:: python @@ -1748,6 +1751,71 @@ Following are interface definitions for flavor support: """ raise NotImplementedError() +Availability Zone +----------------- + +Octavia availability zones have no explicit spec, but are modeled closely +after the existing `flavor specification`_. +Support for availability_zones will be provided through two provider driver +interfaces, one to query supported availability zone metadata keys and another +to validate that an availability zone is supported. Both functions are +synchronous. + +get_supported_availability_zone_metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Retrieves a dictionary of supported availability zone keys and their +description. For example: + +.. code-block:: python + + {"compute_zone": "The compute availability zone to use for this loadbalancer.", + "management_network": "The management network ID for the loadbalancer."} + +validate_availability_zone +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Validates that the driver supports the availability zone metadata dictionary. + +The validate_availability_zone method will be passed an availability zone +metadata dictionary that the driver will validate. This is used when an +operator uploads a new availability zone that applies to the driver. + +The validate_availability_zone method will either return or raise a +``UnsupportedOptionError`` exception. + +Following are interface definitions for availability zone support: + +.. code-block:: python + + def get_supported_availability_zone_metadata(): + """Returns a dict of supported availability zone metadata keys. + + The returned dictionary will include key/value pairs, 'name' and + 'description.' + + :returns: The availability zone metadata dictionary + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support AZs. + """ + raise NotImplementedError() + +.. code-block:: python + + def validate_availability_zone(availability_zone_metadata): + """Validates if driver can support the availability zone. + + :param availability_zone_metadata: Dictionary with az metadata. + :type availability_zone_metadata: dict + :return: Nothing if the availability zone is valid and supported. + :raises DriverError: An unexpected error occurred in the driver. + :raises NotImplementedError: The driver does not support availability + zones. + :raises UnsupportedOptionError: if driver does not + support one of the configuration options. + """ + raise NotImplementedError() + Exception Model --------------- diff --git a/octavia/api/drivers/amphora_driver/v1/driver.py b/octavia/api/drivers/amphora_driver/v1/driver.py index 830927eb7d..55aa5b1ffe 100644 --- a/octavia/api/drivers/amphora_driver/v1/driver.py +++ b/octavia/api/drivers/amphora_driver/v1/driver.py @@ -84,8 +84,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver): def loadbalancer_create(self, loadbalancer): if loadbalancer.flavor == driver_dm.Unset: loadbalancer.flavor = None + if loadbalancer.availability_zone == driver_dm.Unset: + loadbalancer.availability_zone = None payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id, - consts.FLAVOR: loadbalancer.flavor} + consts.FLAVOR: loadbalancer.flavor, + consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} self.client.cast({}, 'create_load_balancer', **payload) def loadbalancer_delete(self, loadbalancer, cascade=False): diff --git a/octavia/api/drivers/amphora_driver/v2/driver.py b/octavia/api/drivers/amphora_driver/v2/driver.py index 9b63322268..b3576b617b 100644 --- a/octavia/api/drivers/amphora_driver/v2/driver.py +++ b/octavia/api/drivers/amphora_driver/v2/driver.py @@ -86,8 +86,11 @@ class AmphoraProviderDriver(driver_base.ProviderDriver): def loadbalancer_create(self, loadbalancer): if loadbalancer.flavor == driver_dm.Unset: loadbalancer.flavor = None + if loadbalancer.availability_zone == driver_dm.Unset: + loadbalancer.availability_zone = None payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id, - consts.FLAVOR: loadbalancer.flavor} + consts.FLAVOR: loadbalancer.flavor, + consts.AVAILABILITY_ZONE: loadbalancer.availability_zone} self.client.cast({}, 'create_load_balancer', **payload) def loadbalancer_delete(self, loadbalancer, cascade=False): diff --git a/octavia/api/v2/controllers/load_balancer.py b/octavia/api/v2/controllers/load_balancer.py index 7520e8bfa0..72f6b6b42d 100644 --- a/octavia/api/v2/controllers/load_balancer.py +++ b/octavia/api/v2/controllers/load_balancer.py @@ -328,6 +328,46 @@ class LoadBalancersController(base.BaseController): raise exceptions.DisabledOption(option='flavor', value=load_balancer.flavor_id) + def _validate_and_return_az_dict(self, lock_session, driver, lb_dict): + + az_dict = {} + if 'availability_zone' in lb_dict: + try: + az = self.repositories.availability_zone.get( + lock_session, name=lb_dict['availability_zone']) + az_dict = ( + self.repositories.availability_zone + .get_availability_zone_metadata_dict(lock_session, az.name) + ) + except sa_exception.NoResultFound: + raise exceptions.ValidationException( + detail=_("Invalid availability_zone.")) + + # Make sure the driver will still accept the availability zone metadata + if az_dict: + try: + driver_utils.call_provider(driver.name, + driver.validate_availability_zone, + az_dict) + except NotImplementedError: + raise exceptions.ProviderNotImplementedError( + prov=driver.name, user_msg="This provider does not support" + " availability zones.") + + return az_dict + + def _validate_availability_zone(self, session, load_balancer): + if not isinstance(load_balancer.availability_zone, wtypes.UnsetType): + az = self.repositories.availability_zone.get( + session, name=load_balancer.availability_zone) + if not az: + raise exceptions.ValidationException( + detail=_("Invalid availability zone.")) + if not az.enabled: + raise exceptions.DisabledOption( + option='availability_zone', + value=load_balancer.availability_zone) + @wsme_pecan.wsexpose(lb_types.LoadBalancerFullRootResponse, body=lb_types.LoadBalancerRootPOST, status_code=201) def post(self, load_balancer): @@ -351,6 +391,8 @@ class LoadBalancersController(base.BaseController): self._validate_flavor(context.session, load_balancer) + self._validate_availability_zone(context.session, load_balancer) + provider = self._get_provider(context.session, load_balancer) # Load the driver early as it also provides validation @@ -383,6 +425,9 @@ class LoadBalancersController(base.BaseController): flavor_dict = self._apply_flavor_to_lb_dict(lock_session, driver, lb_dict) + az_dict = self._validate_and_return_az_dict(lock_session, driver, + lb_dict) + db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) @@ -391,6 +436,9 @@ class LoadBalancersController(base.BaseController): # flavor dict instead of just the flavor_id we store in the DB. lb_dict['flavor'] = flavor_dict + # Do the same with the availability_zone dict + lb_dict['availability_zone'] = az_dict + # See if the provider driver wants to create the VIP port octavia_owned = False try: diff --git a/octavia/api/v2/types/load_balancer.py b/octavia/api/v2/types/load_balancer.py index 64c59f53ec..733b043a27 100644 --- a/octavia/api/v2/types/load_balancer.py +++ b/octavia/api/v2/types/load_balancer.py @@ -55,6 +55,7 @@ class LoadBalancerResponse(BaseLoadBalancerType): flavor_id = wtypes.wsattr(wtypes.UuidType()) vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType()) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType())) + availability_zone = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): @@ -122,6 +123,7 @@ class LoadBalancerPOST(BaseLoadBalancerType): provider = wtypes.wsattr(wtypes.StringType(max_length=64)) tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType(max_length=255))) flavor_id = wtypes.wsattr(wtypes.UuidType()) + availability_zone = wtypes.wsattr(wtypes.StringType(max_length=255)) class LoadBalancerRootPOST(types.BaseType): diff --git a/octavia/common/data_models.py b/octavia/common/data_models.py index 22cebc9399..cf2519a50a 100644 --- a/octavia/common/data_models.py +++ b/octavia/common/data_models.py @@ -461,7 +461,7 @@ class LoadBalancer(BaseDataModel): topology=None, vip=None, listeners=None, amphorae=None, pools=None, vrrp_group=None, server_group_id=None, created_at=None, updated_at=None, provider=None, tags=None, - flavor_id=None): + flavor_id=None, availability_zone=None): self.id = id self.project_id = project_id @@ -482,6 +482,7 @@ class LoadBalancer(BaseDataModel): self.provider = provider self.tags = tags or [] self.flavor_id = flavor_id + self.availability_zone = availability_zone def update(self, update_dict): for key, value in update_dict.items(): diff --git a/octavia/compute/compute_base.py b/octavia/compute/compute_base.py index 2b2db1bedd..a27a8f4455 100644 --- a/octavia/compute/compute_base.py +++ b/octavia/compute/compute_base.py @@ -24,7 +24,8 @@ class ComputeBase(object): def build(self, name="amphora_name", amphora_flavor=None, image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, - config_drive_files=None, user_data=None, server_group_id=None): + config_drive_files=None, user_data=None, server_group_id=None, + availability_zone=None): """Build a new amphora. :param name: Optional name for Amphora @@ -46,6 +47,7 @@ class ComputeBase(object): well or a string :param server_group_id: Optional server group id(uuid) which is used for anti_affinity feature + :param availability_zone: Name of the compute availability zone. :raises ComputeBuildException: if compute failed to build amphora :returns: UUID of amphora diff --git a/octavia/compute/drivers/noop_driver/driver.py b/octavia/compute/drivers/noop_driver/driver.py index 23d2e0f274..6eadf86779 100644 --- a/octavia/compute/drivers/noop_driver/driver.py +++ b/octavia/compute/drivers/noop_driver/driver.py @@ -32,15 +32,16 @@ class NoopManager(object): image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, config_drive_files=None, user_data=None, port_ids=None, - server_group_id=None): + server_group_id=None, availability_zone=None): LOG.debug("Compute %s no-op, build name %s, amphora_flavor %s, " "image_id %s, image_tag %s, image_owner %s, key_name %s, " "sec_groups %s, network_ids %s, config_drive_files %s, " - "user_data %s, port_ids %s, server_group_id %s", + "user_data %s, port_ids %s, server_group_id %s, " + "availability_zone %s", self.__class__.__name__, name, amphora_flavor, image_id, image_tag, image_owner, key_name, sec_groups, network_ids, config_drive_files, - user_data, port_ids, server_group_id) + user_data, port_ids, server_group_id, availability_zone) self.computeconfig[(name, amphora_flavor, image_id, image_tag, image_owner, key_name, user_data, server_group_id)] = ( @@ -127,13 +128,13 @@ class NoopComputeDriver(driver_base.ComputeBase): image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, config_drive_files=None, user_data=None, port_ids=None, - server_group_id=None): + server_group_id=None, availability_zone=None): compute_id = self.driver.build(name, amphora_flavor, image_id, image_tag, image_owner, key_name, sec_groups, network_ids, config_drive_files, user_data, port_ids, - server_group_id) + server_group_id, availability_zone) return compute_id def delete(self, compute_id): diff --git a/octavia/compute/drivers/nova_driver.py b/octavia/compute/drivers/nova_driver.py index 8e7bfd0866..5f9a44269c 100644 --- a/octavia/compute/drivers/nova_driver.py +++ b/octavia/compute/drivers/nova_driver.py @@ -100,7 +100,7 @@ class VirtualMachineManager(compute_base.ComputeBase): image_id=None, image_tag=None, image_owner=None, key_name=None, sec_groups=None, network_ids=None, port_ids=None, config_drive_files=None, user_data=None, - server_group_id=None): + server_group_id=None, availability_zone=None): '''Create a new virtual machine. :param name: optional name for amphora @@ -123,6 +123,7 @@ class VirtualMachineManager(compute_base.ComputeBase): well or a string :param server_group_id: Optional server group id(uuid) which is used for anti_affinity feature + :param availability_zone: Name of the compute availability zone. :raises ComputeBuildException: if nova failed to build virtual machine :returns: UUID of amphora @@ -141,6 +142,7 @@ class VirtualMachineManager(compute_base.ComputeBase): server_group = None if server_group_id is None else { "group": server_group_id} + az_name = availability_zone or CONF.nova.availability_zone image_id = _get_image_uuid( self._glance_client, image_id, image_tag, image_owner) @@ -176,7 +178,7 @@ class VirtualMachineManager(compute_base.ComputeBase): userdata=user_data, config_drive=True, scheduler_hints=server_group, - availability_zone=CONF.nova.availability_zone + availability_zone=az_name ) return amphora.id diff --git a/octavia/controller/housekeeping/house_keeping.py b/octavia/controller/housekeeping/house_keeping.py index 4536027afa..8910bf3495 100644 --- a/octavia/controller/housekeeping/house_keeping.py +++ b/octavia/controller/housekeeping/house_keeping.py @@ -20,6 +20,7 @@ from oslo_log import log as logging from oslo_utils import timeutils from sqlalchemy.orm import exc as sqlalchemy_exceptions +from octavia.common import constants from octavia.controller.worker.v1 import controller_worker as cw from octavia.db import api as db_api from octavia.db import repositories as repo @@ -32,6 +33,7 @@ class SpareAmphora(object): def __init__(self): self.amp_repo = repo.AmphoraRepository() self.spares_repo = repo.SparesPoolRepository() + self.az_repo = repo.AvailabilityZoneRepository() self.cw = cw.ControllerWorker() def spare_check(self): @@ -46,28 +48,51 @@ class SpareAmphora(object): spare_amp_row = self.spares_repo.get_for_update(lock_session) conf_spare_cnt = CONF.house_keeping.spare_amphora_pool_size - curr_spare_cnt = self.amp_repo.get_spare_amphora_count(session) LOG.debug("Required Spare Amphora count : %d", conf_spare_cnt) - LOG.debug("Current Spare Amphora count : %d", curr_spare_cnt) - diff_count = conf_spare_cnt - curr_spare_cnt + availability_zones, links = self.az_repo.get_all(session, + enabled=True) + compute_zones = set() + for az in availability_zones: + az_meta = self.az_repo.get_availability_zone_metadata_dict( + session, az.name) + compute_zones.add(az_meta.get(constants.COMPUTE_ZONE)) + # If no AZs objects then build in the configured AZ (even if None) + # Also if configured AZ is not None then also build in there + # as could be different to the current AZs objects. + if CONF.nova.availability_zone or not compute_zones: + compute_zones.add(CONF.nova.availability_zone) - # When the current spare amphora is less than required amp_booting = [] - if diff_count > 0: - LOG.info("Initiating creation of %d spare amphora.", - diff_count) + for az_name in compute_zones: + # TODO(rm_work): If az_name is None, this will get ALL spares + # across all AZs. This is the safest/most backwards compatible + # way I can think of, as cached_zone on the amphora records + # won't ever match. This should not impact any existing deploys + # with no AZ configured, as the behavior should be identical + # in that case. In the case of multiple AZs configured, it will + # simply ensure there are at least spares *somewhere*, but + # will function more accurately if the operator actually + # configures the AZ setting properly. + curr_spare_cnt = self.amp_repo.get_spare_amphora_count( + session, availability_zone=az_name) + LOG.debug("Current Spare Amphora count for AZ %s: %d", + az_name, curr_spare_cnt) + diff_count = conf_spare_cnt - curr_spare_cnt + # When the current spare amphora is less than required + if diff_count > 0: + LOG.info("Initiating creation of %d spare amphora " + "for az %s.", diff_count, az_name) - # Call Amphora Create Flow diff_count times - with futures.ThreadPoolExecutor( - max_workers=CONF.house_keeping.spare_amphora_pool_size - ) as executor: - for i in range(1, diff_count + 1): - LOG.debug("Starting amphorae number %d ...", i) - amp_booting.append( - executor.submit(self.cw.create_amphora)) - else: - LOG.debug("Current spare amphora count satisfies the " - "requirement") + # Call Amphora Create Flow diff_count times + with futures.ThreadPoolExecutor( + max_workers=conf_spare_cnt) as executor: + for i in range(1, diff_count + 1): + LOG.debug("Starting amphorae number %d ...", i) + amp_booting.append(executor.submit( + self.cw.create_amphora, az_name)) + else: + LOG.debug("Current spare amphora count for AZ %s " + "satisfies the requirement", az_name) # Wait for the amphora boot threads to finish futures.wait(amp_booting) diff --git a/octavia/controller/queue/v1/endpoints.py b/octavia/controller/queue/v1/endpoints.py index 3355da7d90..780497c533 100644 --- a/octavia/controller/queue/v1/endpoints.py +++ b/octavia/controller/queue/v1/endpoints.py @@ -40,9 +40,10 @@ class Endpoints(object): ).driver def create_load_balancer(self, context, load_balancer_id, - flavor=None): + flavor=None, availability_zone=None): LOG.info('Creating load balancer \'%s\'...', load_balancer_id) - self.worker.create_load_balancer(load_balancer_id, flavor) + self.worker.create_load_balancer(load_balancer_id, flavor, + availability_zone) def update_load_balancer(self, context, load_balancer_id, load_balancer_updates): diff --git a/octavia/controller/queue/v2/endpoints.py b/octavia/controller/queue/v2/endpoints.py index 775e3d213a..af84947471 100644 --- a/octavia/controller/queue/v2/endpoints.py +++ b/octavia/controller/queue/v2/endpoints.py @@ -37,9 +37,10 @@ class Endpoints(object): self.worker = controller_worker.ControllerWorker() def create_load_balancer(self, context, load_balancer_id, - flavor=None): + flavor=None, availability_zone=None): LOG.info('Creating load balancer \'%s\'...', load_balancer_id) - self.worker.create_load_balancer(load_balancer_id, flavor) + self.worker.create_load_balancer(load_balancer_id, flavor, + availability_zone) def update_load_balancer(self, context, load_balancer_id, load_balancer_updates): diff --git a/octavia/controller/worker/v1/controller_worker.py b/octavia/controller/worker/v1/controller_worker.py index 7f4b23107e..08ab2714a9 100644 --- a/octavia/controller/worker/v1/controller_worker.py +++ b/octavia/controller/worker/v1/controller_worker.py @@ -70,6 +70,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): self._l7policy_repo = repo.L7PolicyRepository() self._l7rule_repo = repo.L7RuleRepository() self._flavor_repo = repo.FlavorRepository() + self._az_repo = repo.AvailabilityZoneRepository() super(ControllerWorker, self).__init__() @@ -84,7 +85,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): return repo.get(db_apis.get_session(), id=id) - def create_amphora(self): + def create_amphora(self, availability_zone=None): """Creates an Amphora. This is used to create spare amphora. @@ -92,12 +93,17 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): :returns: amphora_id """ try: + store = {constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None} + if availability_zone: + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + db_apis.get_session(), availability_zone)) create_amp_tf = self._taskflow_load( self._amphora_flows.get_create_amphora_flow(), - store={constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_SPARES_POOL_PRIORITY, - constants.FLAVOR: None} - ) + store=store) with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG): create_amp_tf.run() @@ -306,7 +312,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): wait=tenacity.wait_incrementing( RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) - def create_load_balancer(self, load_balancer_id, flavor=None): + def create_load_balancer(self, load_balancer_id, flavor=None, + availability_zone=None): """Creates a load balancer by allocating Amphorae. First tries to allocate an existing Amphora in READY state. @@ -328,7 +335,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): store = {constants.LOADBALANCER_ID: load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: flavor} + constants.FLAVOR: flavor, + constants.AVAILABILITY_ZONE: availability_zone} topology = lb.topology @@ -846,6 +854,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): db_apis.get_session(), lb.flavor_id)) else: stored_params[constants.FLAVOR] = {} + if lb and lb.availability_zone: + stored_params[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + db_apis.get_session(), lb.availability_zone)) + else: + stored_params[constants.AVAILABILITY_ZONE] = {} failover_amphora_tf = self._taskflow_load( self._amphora_flows.get_failover_flow( diff --git a/octavia/controller/worker/v1/flows/amphora_flows.py b/octavia/controller/worker/v1/flows/amphora_flows.py index be041a8071..550e456b8b 100644 --- a/octavia/controller/worker/v1/flows/amphora_flows.py +++ b/octavia/controller/worker/v1/flows/amphora_flows.py @@ -55,12 +55,13 @@ class AmphoraFlows(object): create_amphora_flow.add(compute_tasks.CertComputeCreate( requires=(constants.AMPHORA_ID, constants.SERVER_PEM, - constants.BUILD_TYPE_PRIORITY, constants.FLAVOR), + constants.BUILD_TYPE_PRIORITY, constants.FLAVOR, + constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_ID)) else: create_amphora_flow.add(compute_tasks.ComputeCreate( requires=(constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR), + constants.FLAVOR, constants.AVAILABILITY_ZONE), provides=constants.COMPUTE_ID)) create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB( requires=(constants.AMPHORA_ID, constants.COMPUTE_ID))) @@ -145,7 +146,8 @@ class AmphoraFlows(object): constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -155,7 +157,8 @@ class AmphoraFlows(object): constants.AMPHORA_ID, constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -166,7 +169,8 @@ class AmphoraFlows(object): constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -175,7 +179,8 @@ class AmphoraFlows(object): requires=( constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) @@ -256,7 +261,8 @@ class AmphoraFlows(object): # Setup the task that maps an amphora to a load balancer allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora( name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA, - requires=(constants.LOADBALANCER_ID, constants.FLAVOR), + requires=(constants.LOADBALANCER_ID, constants.FLAVOR, + constants.AVAILABILITY_ZONE), provides=constants.AMPHORA_ID) # Define a subflow for if we successfully map an amphora diff --git a/octavia/controller/worker/v1/tasks/compute_tasks.py b/octavia/controller/worker/v1/tasks/compute_tasks.py index b5a5749ed9..cf8291accf 100644 --- a/octavia/controller/worker/v1/tasks/compute_tasks.py +++ b/octavia/controller/worker/v1/tasks/compute_tasks.py @@ -52,7 +52,8 @@ class ComputeCreate(BaseComputeTask): def execute(self, amphora_id, config_drive_files=None, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - server_group_id=None, ports=None, flavor=None): + server_group_id=None, ports=None, flavor=None, + availability_zone=None): """Create an amphora :returns: an amphora @@ -64,12 +65,7 @@ class ComputeCreate(BaseComputeTask): LOG.debug("Compute create execute for amphora with id %s", amphora_id) user_data_config_drive = CONF.controller_worker.user_data_config_drive - key_name = CONF.controller_worker.amp_ssh_key_name - # TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike. - # Remove the following two lines in the S release. - ssh_access = CONF.controller_worker.amp_ssh_access_allowed - key_name = None if not ssh_access else key_name # Apply an Octavia flavor customizations if flavor: @@ -81,6 +77,14 @@ class ComputeCreate(BaseComputeTask): topology = CONF.controller_worker.loadbalancer_topology amp_compute_flavor = CONF.controller_worker.amp_flavor_id + if availability_zone: + amp_availability_zone = availability_zone.get( + constants.COMPUTE_ZONE) + amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) + if amp_network: + network_ids = [amp_network] + else: + amp_availability_zone = None try: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.add_to_build_request_queue( @@ -113,7 +117,8 @@ class ComputeCreate(BaseComputeTask): port_ids=[port.id for port in ports], config_drive_files=config_drive_files, user_data=user_data, - server_group_id=server_group_id) + server_group_id=server_group_id, + availability_zone=amp_availability_zone) LOG.debug("Server created with id: %s for amphora id: %s", compute_id, amphora_id) @@ -144,7 +149,8 @@ class ComputeCreate(BaseComputeTask): class CertComputeCreate(ComputeCreate): def execute(self, amphora_id, server_pem, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - server_group_id=None, ports=None, flavor=None): + server_group_id=None, ports=None, flavor=None, + availability_zone=None): """Create an amphora :returns: an amphora @@ -162,7 +168,8 @@ class CertComputeCreate(ComputeCreate): return super(CertComputeCreate, self).execute( amphora_id, config_drive_files=config_drive_files, build_type_priority=build_type_priority, - server_group_id=server_group_id, ports=ports, flavor=flavor) + server_group_id=server_group_id, ports=ports, flavor=flavor, + availability_zone=availability_zone) class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): diff --git a/octavia/controller/worker/v1/tasks/database_tasks.py b/octavia/controller/worker/v1/tasks/database_tasks.py index 8c645d9cba..0eff3b0f4d 100644 --- a/octavia/controller/worker/v1/tasks/database_tasks.py +++ b/octavia/controller/worker/v1/tasks/database_tasks.py @@ -499,7 +499,8 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): class MapLoadbalancerToAmphora(BaseDatabaseTask): """Maps and assigns a load balancer to an amphora in the database.""" - def execute(self, loadbalancer_id, server_group_id=None, flavor=None): + def execute(self, loadbalancer_id, server_group_id=None, flavor=None, + availability_zone=None): """Allocates an Amphora for the load balancer in the database. :param loadbalancer_id: The load balancer id to map to an amphora @@ -522,9 +523,15 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask): "allocation.") return None + if availability_zone: + amp_az = availability_zone.get(constants.COMPUTE_ZONE) + else: + amp_az = CONF.nova.availability_zone + amp = self.amphora_repo.allocate_and_associate( db_apis.get_session(), - loadbalancer_id) + loadbalancer_id, + amp_az) if amp is None: LOG.debug("No Amphora available for load balancer with id %s", loadbalancer_id) diff --git a/octavia/controller/worker/v2/controller_worker.py b/octavia/controller/worker/v2/controller_worker.py index 42f5c7d31b..567896928d 100644 --- a/octavia/controller/worker/v2/controller_worker.py +++ b/octavia/controller/worker/v2/controller_worker.py @@ -71,6 +71,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): self._l7policy_repo = repo.L7PolicyRepository() self._l7rule_repo = repo.L7RuleRepository() self._flavor_repo = repo.FlavorRepository() + self._az_repo = repo.AvailabilityZoneRepository() super(ControllerWorker, self).__init__() @@ -85,7 +86,7 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): return repo.get(db_apis.get_session(), id=id) - def create_amphora(self): + def create_amphora(self, availability_zone=None): """Creates an Amphora. This is used to create spare amphora. @@ -93,12 +94,17 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): :returns: amphora_id """ try: + store = {constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None} + if availability_zone: + store[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + db_apis.get_session(), availability_zone)) create_amp_tf = self._taskflow_load( self._amphora_flows.get_create_amphora_flow(), - store={constants.BUILD_TYPE_PRIORITY: - constants.LB_CREATE_SPARES_POOL_PRIORITY, - constants.FLAVOR: None} - ) + store=store) with tf_logging.DynamicLoggingListener(create_amp_tf, log=LOG): create_amp_tf.run() @@ -305,7 +311,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): wait=tenacity.wait_incrementing( RETRY_INITIAL_DELAY, RETRY_BACKOFF, RETRY_MAX), stop=tenacity.stop_after_attempt(RETRY_ATTEMPTS)) - def create_load_balancer(self, load_balancer_id, flavor=None): + def create_load_balancer(self, load_balancer_id, flavor=None, + availability_zone=None): """Creates a load balancer by allocating Amphorae. First tries to allocate an existing Amphora in READY state. @@ -327,7 +334,8 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): store = {constants.LOADBALANCER_ID: load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: flavor} + constants.FLAVOR: flavor, + constants.AVAILABILITY_ZONE: availability_zone} topology = lb.topology @@ -873,6 +881,12 @@ class ControllerWorker(base_taskflow.BaseTaskFlowEngine): db_apis.get_session(), lb.flavor_id)) else: stored_params[constants.FLAVOR] = {} + if lb and lb.availability_zone: + stored_params[constants.AVAILABILITY_ZONE] = ( + self._az_repo.get_availability_zone_metadata_dict( + db_apis.get_session(), lb.availability_zone)) + else: + stored_params[constants.AVAILABILITY_ZONE] = {} failover_amphora_tf = self._taskflow_load( self._amphora_flows.get_failover_flow( diff --git a/octavia/controller/worker/v2/flows/amphora_flows.py b/octavia/controller/worker/v2/flows/amphora_flows.py index 8c81544589..e0bbb09e9e 100644 --- a/octavia/controller/worker/v2/flows/amphora_flows.py +++ b/octavia/controller/worker/v2/flows/amphora_flows.py @@ -150,7 +150,8 @@ class AmphoraFlows(object): constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -160,7 +161,8 @@ class AmphoraFlows(object): constants.AMPHORA_ID, constants.SERVER_PEM, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -171,7 +173,8 @@ class AmphoraFlows(object): constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, constants.SERVER_GROUP_ID, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) else: @@ -180,7 +183,8 @@ class AmphoraFlows(object): requires=( constants.AMPHORA_ID, constants.BUILD_TYPE_PRIORITY, - constants.FLAVOR + constants.FLAVOR, + constants.AVAILABILITY_ZONE, ), provides=constants.COMPUTE_ID)) @@ -266,7 +270,8 @@ class AmphoraFlows(object): # Setup the task that maps an amphora to a load balancer allocate_and_associate_amp = database_tasks.MapLoadbalancerToAmphora( name=sf_name + '-' + constants.MAP_LOADBALANCER_TO_AMPHORA, - requires=(constants.LOADBALANCER_ID, constants.FLAVOR), + requires=(constants.LOADBALANCER_ID, constants.FLAVOR, + constants.AVAILABILITY_ZONE), provides=constants.AMPHORA_ID) # Define a subflow for if we successfully map an amphora diff --git a/octavia/controller/worker/v2/tasks/compute_tasks.py b/octavia/controller/worker/v2/tasks/compute_tasks.py index b5a5749ed9..cf8291accf 100644 --- a/octavia/controller/worker/v2/tasks/compute_tasks.py +++ b/octavia/controller/worker/v2/tasks/compute_tasks.py @@ -52,7 +52,8 @@ class ComputeCreate(BaseComputeTask): def execute(self, amphora_id, config_drive_files=None, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - server_group_id=None, ports=None, flavor=None): + server_group_id=None, ports=None, flavor=None, + availability_zone=None): """Create an amphora :returns: an amphora @@ -64,12 +65,7 @@ class ComputeCreate(BaseComputeTask): LOG.debug("Compute create execute for amphora with id %s", amphora_id) user_data_config_drive = CONF.controller_worker.user_data_config_drive - key_name = CONF.controller_worker.amp_ssh_key_name - # TODO(rm_work): amp_ssh_access_allowed is deprecated in Pike. - # Remove the following two lines in the S release. - ssh_access = CONF.controller_worker.amp_ssh_access_allowed - key_name = None if not ssh_access else key_name # Apply an Octavia flavor customizations if flavor: @@ -81,6 +77,14 @@ class ComputeCreate(BaseComputeTask): topology = CONF.controller_worker.loadbalancer_topology amp_compute_flavor = CONF.controller_worker.amp_flavor_id + if availability_zone: + amp_availability_zone = availability_zone.get( + constants.COMPUTE_ZONE) + amp_network = availability_zone.get(constants.MANAGEMENT_NETWORK) + if amp_network: + network_ids = [amp_network] + else: + amp_availability_zone = None try: if CONF.haproxy_amphora.build_rate_limit != -1: self.rate_limit.add_to_build_request_queue( @@ -113,7 +117,8 @@ class ComputeCreate(BaseComputeTask): port_ids=[port.id for port in ports], config_drive_files=config_drive_files, user_data=user_data, - server_group_id=server_group_id) + server_group_id=server_group_id, + availability_zone=amp_availability_zone) LOG.debug("Server created with id: %s for amphora id: %s", compute_id, amphora_id) @@ -144,7 +149,8 @@ class ComputeCreate(BaseComputeTask): class CertComputeCreate(ComputeCreate): def execute(self, amphora_id, server_pem, build_type_priority=constants.LB_CREATE_NORMAL_PRIORITY, - server_group_id=None, ports=None, flavor=None): + server_group_id=None, ports=None, flavor=None, + availability_zone=None): """Create an amphora :returns: an amphora @@ -162,7 +168,8 @@ class CertComputeCreate(ComputeCreate): return super(CertComputeCreate, self).execute( amphora_id, config_drive_files=config_drive_files, build_type_priority=build_type_priority, - server_group_id=server_group_id, ports=ports, flavor=flavor) + server_group_id=server_group_id, ports=ports, flavor=flavor, + availability_zone=availability_zone) class DeleteAmphoraeOnLoadBalancer(BaseComputeTask): diff --git a/octavia/controller/worker/v2/tasks/database_tasks.py b/octavia/controller/worker/v2/tasks/database_tasks.py index c259f0c4dd..893f3c89b6 100644 --- a/octavia/controller/worker/v2/tasks/database_tasks.py +++ b/octavia/controller/worker/v2/tasks/database_tasks.py @@ -505,7 +505,8 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask): class MapLoadbalancerToAmphora(BaseDatabaseTask): """Maps and assigns a load balancer to an amphora in the database.""" - def execute(self, loadbalancer_id, server_group_id=None, flavor=None): + def execute(self, loadbalancer_id, server_group_id=None, flavor=None, + availability_zone=None): """Allocates an Amphora for the load balancer in the database. :param loadbalancer_id: The load balancer id to map to an amphora @@ -528,9 +529,15 @@ class MapLoadbalancerToAmphora(BaseDatabaseTask): "allocation.") return None + if availability_zone: + amp_az = availability_zone.get(constants.COMPUTE_ZONE) + else: + amp_az = CONF.nova.availability_zone + amp = self.amphora_repo.allocate_and_associate( db_apis.get_session(), - loadbalancer_id) + loadbalancer_id, + amp_az) if amp is None: LOG.debug("No Amphora available for load balancer with id %s", loadbalancer_id) diff --git a/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py b/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py new file mode 100644 index 0000000000..d3dcdc14b5 --- /dev/null +++ b/octavia/db/migration/alembic_migrations/versions/8ac4ed24df3a_add_availability_zone_to_lb.py @@ -0,0 +1,41 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add availability_zone to lb + +Revision ID: 8ac4ed24df3a +Revises: c761c8a71579 +Create Date: 2019-11-13 08:37:39.392163 + +""" + +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '8ac4ed24df3a' +down_revision = 'c761c8a71579' + + +def upgrade(): + op.add_column(u'load_balancer', + sa.Column(u'availability_zone', + sa.String(255), + nullable=True) + ) + + op.create_foreign_key( + u'fk_load_balancer_availability_zone_name', u'load_balancer', + u'availability_zone', [u'availability_zone'], [u'name'] + ) diff --git a/octavia/db/models.py b/octavia/db/models.py index 4cd3b39dca..f750bef87f 100644 --- a/octavia/db/models.py +++ b/octavia/db/models.py @@ -401,6 +401,11 @@ class LoadBalancer(base_models.BASE, base_models.IdMixin, flavor_id = sa.Column( sa.String(36), sa.ForeignKey("flavor.id", name="fk_lb_flavor_id"), nullable=True) + availability_zone = sa.Column( + sa.String(255), + sa.ForeignKey("availability_zone.name", + name="fk_load_balancer_availability_zone_name"), + nullable=True) class VRRPGroup(base_models.BASE): diff --git a/octavia/db/repositories.py b/octavia/db/repositories.py index 617984c5b9..56f3475432 100644 --- a/octavia/db/repositories.py +++ b/octavia/db/repositories.py @@ -1185,7 +1185,8 @@ class AmphoraRepository(BaseRepository): load_balancer.amphorae.append(amphora) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) - def allocate_and_associate(self, session, load_balancer_id): + def allocate_and_associate(self, session, load_balancer_id, + availability_zone=None): """Allocate an amphora for a load balancer. For v0.5 this is simple, find a free amp and @@ -1196,13 +1197,23 @@ class AmphoraRepository(BaseRepository): :param load_balancer_id: The load balancer id to associate :returns: The amphora ID for the load balancer or None """ + filters = { + 'status': 'READY', + 'load_balancer_id': None + } + if availability_zone: + LOG.debug("Filtering amps by zone: %s", availability_zone) + filters['cached_zone'] = availability_zone + with session.begin(subtransactions=True): amp = session.query(self.model_class).with_for_update().filter_by( - status='READY', load_balancer_id=None).first() + **filters).first() if amp is None: return None + if availability_zone: + LOG.debug("Found amp: %s in %s", amp.id, amp.cached_zone) amp.status = 'ALLOCATED' amp.load_balancer_id = load_balancer_id @@ -1237,14 +1248,21 @@ class AmphoraRepository(BaseRepository): return db_lb.to_data_model() return None - def get_spare_amphora_count(self, session): + def get_spare_amphora_count(self, session, availability_zone=None): """Get the count of the spare amphora. :returns: Number of current spare amphora. """ + filters = { + 'status': consts.AMPHORA_READY, + 'load_balancer_id': None + } + if availability_zone is not None: + filters['cached_zone'] = availability_zone + with session.begin(subtransactions=True): count = session.query(self.model_class).filter_by( - status=consts.AMPHORA_READY, load_balancer_id=None).count() + **filters).count() return count @@ -1956,7 +1974,7 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): def delete(self, serial_session, **filters): """Special delete method for availability_zone. - Sets DELETED LBs availability_zone_id to NIL_UUID, then removes the + Sets DELETED LBs availability_zone to NIL_UUID, then removes the availability_zone. :param serial_session: A Sql Alchemy database transaction session. @@ -1965,12 +1983,11 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository): :raises: odb_exceptions.DBReferenceError :raises: sqlalchemy.orm.exc.NoResultFound """ - # TODO(sorrison): Uncomment this - # (serial_session.query(models.LoadBalancer). - # filter(models.LoadBalancer.availability_zone_id == filters['id']). - # filter(models.LoadBalancer.provisioning_status == consts.DELETED). - # update({models.LoadBalancer.availability_zone_id: consts.NIL_UUID}, - # synchronize_session=False)) + (serial_session.query(models.LoadBalancer). + filter(models.LoadBalancer.availability_zone == filters[consts.NAME]). + filter(models.LoadBalancer.provisioning_status == consts.DELETED). + update({models.LoadBalancer.availability_zone: consts.NIL_UUID}, + synchronize_session=False)) availability_zone = ( serial_session.query(self.model_class).filter_by(**filters).one()) serial_session.delete(availability_zone) diff --git a/octavia/tests/functional/api/v2/test_availability_zones.py b/octavia/tests/functional/api/v2/test_availability_zones.py index 5172369291..e085ba04c1 100644 --- a/octavia/tests/functional/api/v2/test_availability_zones.py +++ b/octavia/tests/functional/api/v2/test_availability_zones.py @@ -555,8 +555,6 @@ class TestAvailabilityZones(base.BaseAPITest): self.assertEqual('name1', response.get('name')) def test_delete_in_use(self): - # TODO(sorrison): Enable this test - self.skipTest("Enable in next patch when LB can use AZ") az = self.create_availability_zone( 'name1', 'description', self.azp.get('id'), True) project_id = uuidutils.generate_uuid() @@ -564,7 +562,7 @@ class TestAvailabilityZones(base.BaseAPITest): self.create_load_balancer(lb_id, name='lb1', project_id=project_id, description='desc1', - availability_zone_name=az.get('name'), + availability_zone=az.get('name'), admin_state_up=False) self.delete(self.AZ_PATH.format(az_name=az.get('name')), status=409) diff --git a/octavia/tests/functional/api/v2/test_load_balancer.py b/octavia/tests/functional/api/v2/test_load_balancer.py index 8aa14e1e7e..8fd49f7911 100644 --- a/octavia/tests/functional/api/v2/test_load_balancer.py +++ b/octavia/tests/functional/api/v2/test_load_balancer.py @@ -1031,6 +1031,68 @@ class TestLoadBalancer(base.BaseAPITest): self.assertEqual('noop_driver', api_lb.get('provider')) self.assertEqual(test_flavor_id, api_lb.get('flavor_id')) + def test_create_with_availability_zone(self, **optionals): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), True) + + api_lb = self.test_create(availability_zone=az.get('name')) + self.assertEqual(zone_name, api_lb.get('availability_zone')) + + def test_create_az_disabled(self, **optionals): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), False) + + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': az.get('name'), + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + ref_faultstring = ('The selected availability_zone is not allowed in ' + 'this deployment: {}'.format(zone_name)) + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + def test_create_az_missing(self, **optionals): + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': 'bogus-az', + } + lb_json.update(optionals) + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=400) + ref_faultstring = 'Validation failure: Invalid availability zone.' + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + + @mock.patch('octavia.api.drivers.utils.call_provider') + def test_create_az_unsupported(self, mock_provider): + zone_name = 'nova' + azp = self.create_availability_zone_profile( + 'test1', 'noop_driver', '{"compute_zone": "%s"}' % zone_name) + az = self.create_availability_zone(zone_name, 'description', + azp.get('id'), True) + mock_provider.side_effect = NotImplementedError + + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id, + 'availability_zone': az.get('name'), + } + body = self._build_body(lb_json) + response = self.post(self.LBS_PATH, body, status=501) + ref_faultstring = ("Provider \'noop_driver\' does not support a " + "requested action: This provider does not support " + "availability zones.") + self.assertEqual(ref_faultstring, response.json.get('faultstring')) + def test_matching_providers(self, **optionals): fp = self.create_flavor_profile('test1', 'noop_driver', '{"image": "ubuntu"}') @@ -2504,6 +2566,7 @@ class TestLoadBalancerGraph(base.BaseAPITest): expected_lb = { 'description': '', 'admin_state_up': True, + 'availability_zone': None, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, # TODO(rm_work): vip_network_id is a weird case, as it will be @@ -2515,7 +2578,7 @@ class TestLoadBalancerGraph(base.BaseAPITest): 'vip_qos_policy_id': None, 'flavor_id': None, 'provider': 'noop_driver', - 'tags': [] + 'tags': [], } expected_lb.update(create_lb) expected_lb['listeners'] = expected_listeners diff --git a/octavia/tests/functional/db/test_repositories.py b/octavia/tests/functional/db/test_repositories.py index 436c3ba2d9..98ae1ad091 100644 --- a/octavia/tests/functional/db/test_repositories.py +++ b/octavia/tests/functional/db/test_repositories.py @@ -168,6 +168,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase): del lb_dm_dict['pools'] del lb_dm_dict['created_at'] del lb_dm_dict['updated_at'] + self.assertIsNone(lb_dm_dict.pop('availability_zone')) self.assertEqual(lb, lb_dm_dict) vip_dm_dict = lb_dm.vip.to_dict() vip_dm_dict['load_balancer_id'] = lb_dm.id diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py index 93a42a1347..b7bdbd55e5 100644 --- a/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py +++ b/octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py @@ -13,10 +13,9 @@ # under the License. import mock -from oslo_utils import uuidutils - from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions +from oslo_utils import uuidutils from octavia.api.drivers.amphora_driver.v1 import driver from octavia.common import constants as consts @@ -62,7 +61,8 @@ class TestAmphoraDriver(base.TestRpc): loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_create(provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.FLAVOR: None} + consts.FLAVOR: None, + consts.AVAILABILITY_ZONE: None} mock_cast.assert_called_with({}, 'create_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') diff --git a/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py index 26d24ebd88..dad650d257 100644 --- a/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py +++ b/octavia/tests/unit/api/drivers/amphora_driver/v2/test_amphora_driver.py @@ -13,10 +13,9 @@ # under the License. import mock -from oslo_utils import uuidutils - from octavia_lib.api.drivers import data_models as driver_dm from octavia_lib.api.drivers import exceptions +from oslo_utils import uuidutils from octavia.api.drivers.amphora_driver.v2 import driver from octavia.common import constants as consts @@ -62,7 +61,8 @@ class TestAmphoraDriver(base.TestRpc): loadbalancer_id=self.sample_data.lb_id) self.amp_driver.loadbalancer_create(provider_lb) payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id, - consts.FLAVOR: None} + consts.FLAVOR: None, + consts.AVAILABILITY_ZONE: None} mock_cast.assert_called_with({}, 'create_load_balancer', **payload) @mock.patch('oslo_messaging.RPCClient.cast') diff --git a/octavia/tests/unit/compute/drivers/test_nova_driver.py b/octavia/tests/unit/compute/drivers/test_nova_driver.py index 5e5ca7e8d8..e4718afc31 100644 --- a/octavia/tests/unit/compute/drivers/test_nova_driver.py +++ b/octavia/tests/unit/compute/drivers/test_nova_driver.py @@ -219,6 +219,35 @@ class TestNovaClient(base.TestCase): def test_build_with_availability_zone(self): FAKE_AZ = "my_availability_zone" + + amphora_id = self.manager.build(amphora_flavor=1, image_id=1, + key_name=1, + sec_groups=1, + network_ids=[1], + port_ids=[2], + user_data='Blah', + config_drive_files='Files Blah', + availability_zone=FAKE_AZ) + + self.assertEqual(self.amphora.compute_id, amphora_id) + + self.manager.manager.create.assert_called_with( + name="amphora_name", + nics=[{'net-id': 1}, {'port-id': 2}], + image=1, + flavor=1, + key_name=1, + security_groups=1, + files='Files Blah', + userdata='Blah', + config_drive=True, + scheduler_hints=None, + availability_zone=FAKE_AZ, + block_device_mapping={} + ) + + def test_build_with_availability_zone_config(self): + FAKE_AZ = "my_availability_zone" self.conf.config(group="nova", availability_zone=FAKE_AZ) amphora_id = self.manager.build(amphora_flavor=1, image_id=1, diff --git a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py index 16926dff0e..fd5d3e17a4 100644 --- a/octavia/tests/unit/controller/housekeeping/test_house_keeping.py +++ b/octavia/tests/unit/controller/housekeeping/test_house_keeping.py @@ -48,9 +48,11 @@ class TestSpareCheck(base.TestCase): super(TestSpareCheck, self).setUp() self.spare_amp = house_keeping.SpareAmphora() self.amp_repo = mock.MagicMock() + self.az_repo = mock.MagicMock() self.cw = mock.MagicMock() self.spare_amp.amp_repo = self.amp_repo + self.spare_amp.az_repo = self.az_repo self.spare_amp.cw = self.cw self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF)) @@ -62,12 +64,43 @@ class TestSpareCheck(base.TestCase): spare_amphora_pool_size=self.FAKE_CNF_SPAR1) self.amp_repo.get_spare_amphora_count.return_value = ( self.FAKE_CUR_SPAR1) + self.az_repo.get_all.return_value = [], None self.spare_amp.spare_check() self.assertTrue(self.amp_repo.get_spare_amphora_count.called) DIFF_CNT = self.FAKE_CNF_SPAR1 - self.FAKE_CUR_SPAR1 self.assertEqual(DIFF_CNT, self.cw.create_amphora.call_count) + @mock.patch('octavia.db.api.get_session') + def test_spare_check_diff_count_multi_az(self, session): + """When spare amphora count does not meet the requirement. + + Tests when multiple availabilty zones active + """ + session.return_value = session + self.CONF.config(group="house_keeping", + spare_amphora_pool_size=self.FAKE_CNF_SPAR1) + az1 = mock.Mock() + az1.name = 'az1' + az2 = mock.Mock() + az2.name = 'az2' + self.az_repo.get_all.return_value = [az1, az2], None + self.amp_repo.get_spare_amphora_count.return_value = ( + self.FAKE_CUR_SPAR1) + self.az_repo.get_availability_zone_metadata_dict().get.side_effect = ( + az1.name, az2.name) + self.spare_amp.spare_check() + + calls = [mock.call(session, availability_zone=az1.name), + mock.call(session, availability_zone=az2.name)] + self.amp_repo.get_spare_amphora_count.assert_has_calls(calls, + any_order=True) + + # 2 AZs so twice as many calls + DIFF_CNT = (self.FAKE_CNF_SPAR1 - self.FAKE_CUR_SPAR1) * 2 + + self.assertEqual(DIFF_CNT, self.cw.create_amphora.call_count) + @mock.patch('octavia.db.api.get_session') def test_spare_check_no_diff_count(self, session): """When spare amphora count meets the requirement.""" @@ -76,6 +109,7 @@ class TestSpareCheck(base.TestCase): spare_amphora_pool_size=self.FAKE_CNF_SPAR2) self.amp_repo.get_spare_amphora_count.return_value = ( self.FAKE_CUR_SPAR2) + self.az_repo.get_all.return_value = [], None self.spare_amp.spare_check() self.assertTrue(self.amp_repo.get_spare_amphora_count.called) DIFF_CNT = self.FAKE_CNF_SPAR2 - self.FAKE_CUR_SPAR2 diff --git a/octavia/tests/unit/controller/queue/v1/test_endpoints.py b/octavia/tests/unit/controller/queue/v1/test_endpoints.py index cc7c15f16a..16d3e585d0 100644 --- a/octavia/tests/unit/controller/queue/v1/test_endpoints.py +++ b/octavia/tests/unit/controller/queue/v1/test_endpoints.py @@ -41,17 +41,19 @@ class TestEndpoints(base.TestCase): self.resource_id = 1234 self.server_group_id = 3456 self.flavor_id = uuidutils.generate_uuid() + self.availability_zone = uuidutils.generate_uuid() def test_create_load_balancer(self): self.ep.create_load_balancer(self.context, self.resource_id, - flavor=self.flavor_id) + flavor=self.flavor_id, + availability_zone=self.availability_zone) self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, self.flavor_id) + self.resource_id, self.flavor_id, self.availability_zone) - def test_create_load_balancer_no_flavor(self): + def test_create_load_balancer_no_flavor_or_az(self): self.ep.create_load_balancer(self.context, self.resource_id) self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, None) + self.resource_id, None, None) def test_update_load_balancer(self): self.ep.update_load_balancer(self.context, self.resource_id, diff --git a/octavia/tests/unit/controller/queue/v2/test_endpoints.py b/octavia/tests/unit/controller/queue/v2/test_endpoints.py index 3c06def931..820e6baedb 100644 --- a/octavia/tests/unit/controller/queue/v2/test_endpoints.py +++ b/octavia/tests/unit/controller/queue/v2/test_endpoints.py @@ -42,17 +42,19 @@ class TestEndpoints(base.TestCase): self.server_group_id = 3456 self.listener_dict = {constants.ID: uuidutils.generate_uuid()} self.flavor_id = uuidutils.generate_uuid() + self.availability_zone = uuidutils.generate_uuid() def test_create_load_balancer(self): self.ep.create_load_balancer(self.context, self.resource_id, - flavor=self.flavor_id) + flavor=self.flavor_id, + availability_zone=self.availability_zone) self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, self.flavor_id) + self.resource_id, self.flavor_id, self.availability_zone) - def test_create_load_balancer_no_flavor(self): + def test_create_load_balancer_no_flavor_or_az(self): self.ep.create_load_balancer(self.context, self.resource_id) self.ep.worker.create_load_balancer.assert_called_once_with( - self.resource_id, None) + self.resource_id, None, None) def test_update_load_balancer(self): self.ep.update_load_balancer(self.context, self.resource_id, diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py index 30b6828c38..e2bd5b5a26 100644 --- a/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py @@ -57,7 +57,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(3, len(amp_flow.requires)) def test_get_create_amphora_flow_cert(self, mock_get_net_driver): self.AmpFlow = amphora_flows.AmphoraFlows() @@ -71,7 +71,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.COMPUTE_ID, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(2, len(amp_flow.requires)) + self.assertEqual(3, len(amp_flow.requires)) def test_get_create_amphora_for_lb_flow(self, mock_get_net_driver): @@ -89,7 +89,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): @@ -109,7 +109,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_master_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -130,7 +130,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -149,7 +149,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.conf.config(group="nova", enable_anti_affinity=False) def test_get_cert_backup_create_amphora_for_lb_flow( @@ -170,7 +170,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_bogus_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -190,7 +190,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -208,7 +208,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.conf.config(group="nova", enable_anti_affinity=False) def test_get_delete_amphora_flow(self, mock_get_net_driver): @@ -259,7 +259,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -279,7 +279,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -299,7 +299,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -319,7 +319,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) def test_get_failover_flow_spare(self, mock_get_net_driver): diff --git a/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py index 19fea84da1..3419644c74 100644 --- a/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +++ b/octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py @@ -195,7 +195,7 @@ class TestLoadBalancerFlows(base.TestCase): self.assertIn(constants.AMP_DATA, create_flow.provides) self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) - self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(5, len(create_flow.requires)) self.assertEqual(13, len(create_flow.provides), create_flow.provides) @@ -223,6 +223,6 @@ class TestLoadBalancerFlows(base.TestCase): self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) - self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(5, len(create_flow.requires)) self.assertEqual(14, len(create_flow.provides), create_flow.provides) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py index 1518b274bf..50fe7afe64 100644 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py @@ -124,7 +124,8 @@ class TestComputeTasks(base.TestCase): 'amphora-agent.conf': 'test_conf', '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, user_data=None, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) # Make sure it returns the expected compute_id self.assertEqual(COMPUTE_ID, compute_id) @@ -184,7 +185,77 @@ class TestComputeTasks(base.TestCase): port_ids=[PORT_ID], config_drive_files=None, user_data='test_ud_conf', - server_group_id=None) + server_group_id=None, + availability_zone=None) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_availability_zone(self, mock_driver, mock_conf, + mock_jinja, mock_log_cfg): + + image_owner_id = uuidutils.generate_uuid() + compute_zone = uuidutils.generate_uuid() + az_dict = {constants.COMPUTE_ZONE: compute_zone} + + self.conf.config( + group="controller_worker", amp_image_owner_id=image_owner_id) + mock_log_cfg.return_value = 'FAKE CFG' + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID, + availability_zone=az_dict) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner=image_owner_id, + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf', + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, + user_data=None, + server_group_id=SERVER_GRPOUP_ID, + availability_zone=compute_zone) # Make sure it returns the expected compute_id self.assertEqual(COMPUTE_ID, compute_id) @@ -249,7 +320,8 @@ class TestComputeTasks(base.TestCase): 'amphora-agent.conf': 'test_conf', '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, user_data=None, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) self.assertEqual(COMPUTE_ID, compute_id) @@ -317,7 +389,8 @@ class TestComputeTasks(base.TestCase): '/etc/octavia/certs/server.pem': fer.decrypt(test_cert), '/etc/octavia/certs/client_ca.pem': 'test', '/etc/octavia/amphora-agent.conf': 'test_conf'}, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) self.assertEqual(COMPUTE_ID, compute_id) diff --git a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py index c2ca7301eb..1bac5190bb 100644 --- a/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +++ b/octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py @@ -556,7 +556,53 @@ class TestDatabaseTasks(base.TestCase): repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( 'TEST', - LB_ID) + LB_ID, + None) + + self.assertEqual(_amphora_mock.id, amp_id) + + amp_id = map_lb_to_amp.execute(self.loadbalancer_mock.id) + + self.assertIsNone(amp_id) + + # Test revert + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test revert with exception + repo.LoadBalancerRepository.update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'allocate_and_associate', + side_effect=[_amphora_mock, None]) + def test_map_loadbalancer_to_amphora_with_az(self, + mock_allocate_and_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora() + amp_id = map_lb_to_amp.execute( + self.loadbalancer_mock.id, availability_zone={ + constants.COMPUTE_ZONE: 'fakeaz'}) + + repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( + 'TEST', + LB_ID, + 'fakeaz') self.assertEqual(_amphora_mock.id, amp_id) diff --git a/octavia/tests/unit/controller/worker/v1/test_controller_worker.py b/octavia/tests/unit/controller/worker/v1/test_controller_worker.py index 7092fd55fc..db516b0d9b 100644 --- a/octavia/tests/unit/controller/worker/v1/test_controller_worker.py +++ b/octavia/tests/unit/controller/worker/v1/test_controller_worker.py @@ -120,8 +120,8 @@ class TestControllerWorker(base.TestCase): 'amphora_flows.AmphoraFlows.get_create_amphora_flow', return_value='TEST') def test_create_amphora(self, - mock_api_get_session, mock_get_create_amp_flow, + mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, @@ -143,7 +143,49 @@ class TestControllerWorker(base.TestCase): 'TEST', store={constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_SPARES_POOL_PRIORITY, - constants.FLAVOR: None})) + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None})) + + _flow_mock.run.assert_called_once_with() + + _flow_mock.storage.fetch.assert_called_once_with('amphora') + + self.assertEqual(AMP_ID, amp) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + @mock.patch('octavia.controller.worker.v1.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_flow', + return_value='TEST') + def test_create_amphora_with_az(self, + mock_get_create_amp_flow, + mock_get_az_metadata, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + az = 'fake_az' + az_data = {constants.COMPUTE_ZONE: az} + mock_get_az_metadata.return_value = az_data + cw = controller_worker.ControllerWorker() + amp = cw.create_amphora(availability_zone=az) + mock_get_az_metadata.assert_called_once_with(_db_session, az) + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + 'TEST', + store={constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: az_data})) _flow_mock.run.assert_called_once_with() @@ -423,7 +465,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } lb_mock = mock.MagicMock() lb_mock.listeners = [] @@ -470,7 +513,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } setattr(mock_lb_repo_get.return_value, 'topology', constants.TOPOLOGY_ACTIVE_STANDBY) @@ -517,7 +561,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } cw = controller_worker.ControllerWorker() @@ -570,7 +615,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } cw = controller_worker.ControllerWorker() @@ -1144,6 +1190,8 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) @mock.patch('octavia.controller.worker.v1.flows.' @@ -1154,6 +1202,7 @@ class TestControllerWorker(base.TestCase): mock_update, mock_get_failover_flow, mock_get_flavor_meta, + mock_get_az_meta, mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, @@ -1179,7 +1228,8 @@ class TestControllerWorker(base.TestCase): _amphora_mock.load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() @@ -1302,7 +1352,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: None, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() @@ -1380,6 +1431,8 @@ class TestControllerWorker(base.TestCase): mock_update.assert_called_with(_db_session, 123, provisioning_status=constants.ERROR) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) @mock.patch('octavia.controller.worker.v1.flows.' @@ -1394,6 +1447,7 @@ class TestControllerWorker(base.TestCase): mock_get_lb_for_amphora, mock_get_update_listener_flow, mock_get_flavor_meta, + mock_get_az_meta, mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, @@ -1422,7 +1476,8 @@ class TestControllerWorker(base.TestCase): constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.SERVER_GROUP_ID: "123", - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py index 0996afc9f4..d8578546aa 100644 --- a/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +++ b/octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py @@ -89,7 +89,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_create_amphora_for_lb_flow(self, mock_get_net_driver): @@ -109,7 +109,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_master_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -130,7 +130,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_master_rest_anti_affinity_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -149,7 +149,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.conf.config(group="nova", enable_anti_affinity=False) def test_get_cert_backup_create_amphora_for_lb_flow( @@ -170,7 +170,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_bogus_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -190,7 +190,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(3, len(amp_flow.requires)) + self.assertEqual(4, len(amp_flow.requires)) def test_get_cert_backup_rest_anti_affinity_create_amphora_for_lb_flow( self, mock_get_net_driver): @@ -208,7 +208,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.SERVER_PEM, amp_flow.provides) self.assertEqual(5, len(amp_flow.provides)) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.conf.config(group="nova", enable_anti_affinity=False) def test_get_delete_amphora_flow(self, mock_get_net_driver): @@ -259,7 +259,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -279,7 +279,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -299,7 +299,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) amp_flow = self.AmpFlow.get_failover_flow( @@ -319,7 +319,7 @@ class TestAmphoraFlows(base.TestCase): self.assertIn(constants.LISTENERS, amp_flow.provides) self.assertIn(constants.LOADBALANCER, amp_flow.provides) - self.assertEqual(4, len(amp_flow.requires)) + self.assertEqual(5, len(amp_flow.requires)) self.assertEqual(12, len(amp_flow.provides)) def test_get_failover_flow_spare(self, mock_get_net_driver): diff --git a/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py index ed5f4f6e2b..897f0d86e0 100644 --- a/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +++ b/octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py @@ -201,7 +201,7 @@ class TestLoadBalancerFlows(base.TestCase): self.assertIn(constants.AMP_DATA, create_flow.provides) self.assertIn(constants.AMPHORA_NETWORK_CONFIG, create_flow.provides) - self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(5, len(create_flow.requires)) self.assertEqual(13, len(create_flow.provides), create_flow.provides) @@ -229,6 +229,6 @@ class TestLoadBalancerFlows(base.TestCase): self.assertIn(constants.AMPHORAE_NETWORK_CONFIG, create_flow.provides) - self.assertEqual(4, len(create_flow.requires)) + self.assertEqual(5, len(create_flow.requires)) self.assertEqual(14, len(create_flow.provides), create_flow.provides) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py index fbedc9d3fc..ebd4b73015 100644 --- a/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_compute_tasks.py @@ -124,7 +124,9 @@ class TestComputeTasks(base.TestCase): 'amphora-agent.conf': 'test_conf', '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, user_data=None, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None + ) # Make sure it returns the expected compute_id self.assertEqual(COMPUTE_ID, compute_id) @@ -184,7 +186,77 @@ class TestComputeTasks(base.TestCase): port_ids=[PORT_ID], config_drive_files=None, user_data='test_ud_conf', - server_group_id=None) + server_group_id=None, + availability_zone=None) + + # Make sure it returns the expected compute_id + self.assertEqual(COMPUTE_ID, compute_id) + + # Test that a build exception is raised + createcompute = compute_tasks.ComputeCreate() + + self.assertRaises(TypeError, + createcompute.execute, + _amphora_mock, config_drive_files='test_cert') + + # Test revert() + + _amphora_mock.compute_id = COMPUTE_ID + + createcompute = compute_tasks.ComputeCreate() + createcompute.revert(compute_id, _amphora_mock.id) + + # Validate that the delete method was called properly + mock_driver.delete.assert_called_once_with( + COMPUTE_ID) + + # Test that a delete exception is not raised + + createcompute.revert(COMPUTE_ID, _amphora_mock.id) + + @mock.patch('octavia.common.jinja.logging.logging_jinja_cfg.' + 'LoggingJinjaTemplater.build_logging_config') + @mock.patch('jinja2.Environment.get_template') + @mock.patch('octavia.amphorae.backends.agent.' + 'agent_jinja_cfg.AgentJinjaTemplater.' + 'build_agent_config', return_value='test_conf') + @mock.patch('stevedore.driver.DriverManager.driver') + def test_compute_create_availability_zone(self, mock_driver, mock_conf, + mock_jinja, mock_log_cfg): + + image_owner_id = uuidutils.generate_uuid() + compute_zone = uuidutils.generate_uuid() + az_dict = {constants.COMPUTE_ZONE: compute_zone} + + self.conf.config( + group="controller_worker", amp_image_owner_id=image_owner_id) + mock_log_cfg.return_value = 'FAKE CFG' + + createcompute = compute_tasks.ComputeCreate() + + mock_driver.build.return_value = COMPUTE_ID + # Test execute() + compute_id = createcompute.execute(_amphora_mock.id, ports=[_port], + server_group_id=SERVER_GRPOUP_ID, + availability_zone=az_dict) + + # Validate that the build method was called properly + mock_driver.build.assert_called_once_with( + name="amphora-" + _amphora_mock.id, + amphora_flavor=AMP_FLAVOR_ID, + image_id=AMP_IMAGE_ID, + image_tag=AMP_IMAGE_TAG, + image_owner=image_owner_id, + key_name=AMP_SSH_KEY_NAME, + sec_groups=AMP_SEC_GROUPS, + network_ids=AMP_NET, + port_ids=[PORT_ID], + config_drive_files={'/etc/octavia/' + 'amphora-agent.conf': 'test_conf', + '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, + user_data=None, + server_group_id=SERVER_GRPOUP_ID, + availability_zone=compute_zone) # Make sure it returns the expected compute_id self.assertEqual(COMPUTE_ID, compute_id) @@ -249,7 +321,8 @@ class TestComputeTasks(base.TestCase): 'amphora-agent.conf': 'test_conf', '/etc/rsyslog.d/10-rsyslog.conf': 'FAKE CFG'}, user_data=None, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) self.assertEqual(COMPUTE_ID, compute_id) @@ -317,7 +390,8 @@ class TestComputeTasks(base.TestCase): '/etc/octavia/certs/server.pem': fer.decrypt(test_cert), '/etc/octavia/certs/client_ca.pem': 'test', '/etc/octavia/amphora-agent.conf': 'test_conf'}, - server_group_id=SERVER_GRPOUP_ID) + server_group_id=SERVER_GRPOUP_ID, + availability_zone=None) self.assertEqual(COMPUTE_ID, compute_id) diff --git a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py index 32ef0d6480..d9ca842cbd 100644 --- a/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +++ b/octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py @@ -567,7 +567,53 @@ class TestDatabaseTasks(base.TestCase): repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( 'TEST', - LB_ID) + LB_ID, + None) + + self.assertEqual(_amphora_mock.id, amp_id) + + amp_id = map_lb_to_amp.execute(self.loadbalancer_mock.id) + + self.assertIsNone(amp_id) + + # Test revert + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + # Test revert with exception + repo.LoadBalancerRepository.update.reset_mock() + mock_loadbalancer_repo_update.side_effect = Exception('fail') + map_lb_to_amp.revert(None, self.loadbalancer_mock.id) + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=LB_ID, + provisioning_status=constants.ERROR) + + @mock.patch('octavia.db.repositories.AmphoraRepository.' + 'allocate_and_associate', + side_effect=[_amphora_mock, None]) + def test_map_loadbalancer_to_amphora_with_az(self, + mock_allocate_and_associate, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + + map_lb_to_amp = database_tasks.MapLoadbalancerToAmphora() + amp_id = map_lb_to_amp.execute( + self.loadbalancer_mock.id, availability_zone={ + constants.COMPUTE_ZONE: 'fakeaz'}) + + repo.AmphoraRepository.allocate_and_associate.assert_called_once_with( + 'TEST', + LB_ID, + 'fakeaz') self.assertEqual(_amphora_mock.id, amp_id) diff --git a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py index 07779840d8..b7a2c961c5 100644 --- a/octavia/tests/unit/controller/worker/v2/test_controller_worker.py +++ b/octavia/tests/unit/controller/worker/v2/test_controller_worker.py @@ -132,8 +132,8 @@ class TestControllerWorker(base.TestCase): 'amphora_flows.AmphoraFlows.get_create_amphora_flow', return_value='TEST') def test_create_amphora(self, - mock_api_get_session, mock_get_create_amp_flow, + mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, mock_pool_repo_get, @@ -155,7 +155,49 @@ class TestControllerWorker(base.TestCase): 'TEST', store={constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_SPARES_POOL_PRIORITY, - constants.FLAVOR: None})) + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None})) + + _flow_mock.run.assert_called_once_with() + + _flow_mock.storage.fetch.assert_called_once_with('amphora') + + self.assertEqual(AMP_ID, amp) + + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict') + @mock.patch('octavia.controller.worker.v2.flows.' + 'amphora_flows.AmphoraFlows.get_create_amphora_flow', + return_value='TEST') + def test_create_amphora_with_az(self, + mock_get_create_amp_flow, + mock_get_az_metadata, + mock_api_get_session, + mock_dyn_log_listener, + mock_taskflow_load, + mock_pool_repo_get, + mock_member_repo_get, + mock_l7rule_repo_get, + mock_l7policy_repo_get, + mock_listener_repo_get, + mock_lb_repo_get, + mock_health_mon_repo_get, + mock_amp_repo_get): + + _flow_mock.reset_mock() + az = 'fake_az' + az_data = {constants.COMPUTE_ZONE: az} + mock_get_az_metadata.return_value = az_data + cw = controller_worker.ControllerWorker() + amp = cw.create_amphora(availability_zone=az) + mock_get_az_metadata.assert_called_once_with(_db_session, az) + (base_taskflow.BaseTaskFlowEngine._taskflow_load. + assert_called_once_with( + 'TEST', + store={constants.BUILD_TYPE_PRIORITY: + constants.LB_CREATE_SPARES_POOL_PRIORITY, + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: az_data})) _flow_mock.run.assert_called_once_with() @@ -445,7 +487,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } lb_mock = mock.MagicMock() lb_mock.listeners = [] @@ -492,7 +535,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } setattr(mock_lb_repo_get.return_value, 'topology', constants.TOPOLOGY_ACTIVE_STANDBY) @@ -539,7 +583,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_SINGLE}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } cw = controller_worker.ControllerWorker() @@ -592,7 +637,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: LB_ID, 'update_dict': {'topology': constants.TOPOLOGY_ACTIVE_STANDBY}, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY, - constants.FLAVOR: None + constants.FLAVOR: None, + constants.AVAILABILITY_ZONE: None, } cw = controller_worker.ControllerWorker() @@ -1197,6 +1243,8 @@ class TestControllerWorker(base.TestCase): _flow_mock.run.assert_called_once_with() + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) @mock.patch('octavia.controller.worker.v2.flows.' @@ -1207,6 +1255,7 @@ class TestControllerWorker(base.TestCase): mock_update, mock_get_failover_flow, mock_get_flavor_meta, + mock_get_az_meta, mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, @@ -1232,7 +1281,8 @@ class TestControllerWorker(base.TestCase): _amphora_mock.load_balancer_id, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() @@ -1355,7 +1405,8 @@ class TestControllerWorker(base.TestCase): constants.LOADBALANCER_ID: None, constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() @@ -1433,6 +1484,8 @@ class TestControllerWorker(base.TestCase): mock_update.assert_called_with(_db_session, 123, provisioning_status=constants.ERROR) + @mock.patch('octavia.db.repositories.AvailabilityZoneRepository.' + 'get_availability_zone_metadata_dict', return_value={}) @mock.patch('octavia.db.repositories.FlavorRepository.' 'get_flavor_metadata_dict', return_value={}) @mock.patch('octavia.controller.worker.v2.flows.' @@ -1447,6 +1500,7 @@ class TestControllerWorker(base.TestCase): mock_get_lb_for_amphora, mock_get_update_listener_flow, mock_get_flavor_meta, + mock_get_az_meta, mock_api_get_session, mock_dyn_log_listener, mock_taskflow_load, @@ -1475,7 +1529,8 @@ class TestControllerWorker(base.TestCase): constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_FAILOVER_PRIORITY, constants.SERVER_GROUP_ID: "123", - constants.FLAVOR: {} + constants.FLAVOR: {}, + constants.AVAILABILITY_ZONE: {} })) _flow_mock.run.assert_called_once_with() diff --git a/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml b/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml new file mode 100644 index 0000000000..da92ce23dd --- /dev/null +++ b/releasenotes/notes/support-az-on-lb-create-562dcf639bb272ea.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + The load balancer create command now accepts an availability_zone argument. + With the amphora driver this will create a load balancer in the targeted + compute availability_zone in nova. + + When using spare pools, it will create spares in each AZ. For the amphora + driver, if no ``[nova] availability_zone`` is configured and availability + zones are used, results may be slightly unpredictable. + + Note (for the ``amphora`` driver): if it is possible for an amphora to + change availability zone after initial creation (not typically possible + without outside intervention) this may affect the ability of this feature + to function properly.