diff --git a/vmware_nsx/common/config.py b/vmware_nsx/common/config.py index 7ef5689845..aec9fee82d 100644 --- a/vmware_nsx/common/config.py +++ b/vmware_nsx/common/config.py @@ -429,8 +429,10 @@ nsxv_opts = [ 'deploy NSX Edges')), cfg.ListOpt('availability_zones', default=[], - help=_('Optional parameter identifying the IDs of alternative ' - 'resources to deploy NSX Edges')), + help=_('Optional parameter defining the availability zones ' + 'for deploying NSX Edges with the format: :' + ':<(optional)HA ' + 'datastore id>.')), cfg.StrOpt('datastore_id', deprecated_group="vcns", help=_('Optional parameter identifying the ID of datastore to ' diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD b/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD index 31c2766249..f4dd4dc895 100644 --- a/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD +++ b/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD @@ -1 +1 @@ -7e46906f8997 +86a55205337c diff --git a/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py b/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py new file mode 100644 index 0000000000..44158914ba --- /dev/null +++ b/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py @@ -0,0 +1,38 @@ +# Copyright 2016 VMware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""NSXv add availability zone to the router bindings table instead of +the resource pool column + +Revision ID: 86a55205337c +Revises: 7e46906f8997 +Create Date: 2016-07-12 09:18:44.450116 +""" + +# revision identifiers, used by Alembic. +revision = '86a55205337c' +down_revision = '7e46906f8997' + +from alembic import op +import sqlalchemy as sa + +from vmware_nsx.common import config # noqa + + +def upgrade(): + op.alter_column('nsxv_router_bindings', 'resource_pool', + new_column_name='availability_zone', + existing_type=sa.String(36), + existing_nullable=True, + existing_server_default='default') diff --git a/vmware_nsx/db/nsxv_db.py b/vmware_nsx/db/nsxv_db.py index 06754ebf9a..ccda864325 100644 --- a/vmware_nsx/db/nsxv_db.py +++ b/vmware_nsx/db/nsxv_db.py @@ -54,7 +54,7 @@ def _apply_filters_to_query(query, model, filters, like_filters=None): def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status, appliance_size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=None): + availability_zone=None): with session.begin(subtransactions=True): binding = nsxv_models.NsxvRouterBinding( router_id=router_id, @@ -63,7 +63,7 @@ def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status, status=status, appliance_size=appliance_size, edge_type=edge_type, - resource_pool=resource_pool) + availability_zone=availability_zone) session.add(binding) return binding @@ -137,10 +137,10 @@ def delete_nsxv_router_binding(session, router_id): session.delete(binding) -def get_edge_resource_pool(session, edge_id): +def get_edge_availability_zone(session, edge_id): binding = get_nsxv_router_binding_by_edge(session, edge_id) if binding: - return binding['resource_pool'] + return binding['availability_zone'] def get_edge_vnic_binding(session, edge_id, network_id): diff --git a/vmware_nsx/db/nsxv_models.py b/vmware_nsx/db/nsxv_models.py index d5940e944e..4e177cba0f 100644 --- a/vmware_nsx/db/nsxv_models.py +++ b/vmware_nsx/db/nsxv_models.py @@ -50,8 +50,8 @@ class NsxvRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription, edge_type = sa.Column(sa.Enum(nsxv_constants.SERVICE_EDGE, nsxv_constants.VDR_EDGE, name='nsxv_router_bindings_edge_type')) - resource_pool = sa.Column(sa.String(36), - nullable=True) + availability_zone = sa.Column(sa.String(36), + nullable=True) class NsxvEdgeVnicBinding(model_base.BASEV2, models.TimestampMixin): diff --git a/vmware_nsx/plugins/nsx_v/availability_zones.py b/vmware_nsx/plugins/nsx_v/availability_zones.py new file mode 100644 index 0000000000..e5fce79378 --- /dev/null +++ b/vmware_nsx/plugins/nsx_v/availability_zones.py @@ -0,0 +1,89 @@ +# Copyright 2016 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_config import cfg + +from vmware_nsx._i18n import _ + +DEFAULT_NAME = 'default' + + +class ConfiguredAvailabilityZone(object): + + def __init__(self, config_line): + if config_line: + values = config_line.split(':') + if len(values) < 3 or len(values) > 4: + raise n_exc.Invalid(_("Invalid availability zones format")) + + self.name = values[0] + # field name limit in the DB + if len(self.name) > 36: + raise n_exc.Invalid(_("Invalid availability zone name %s: " + "max name length is 36"), self.name) + + self.resource_pool = values[1] + self.datastore_id = values[2] + self.ha_datastore_id = values[3] if len(values) == 4 else None + else: + # use the default configuration + self.name = DEFAULT_NAME + self.resource_pool = cfg.CONF.nsxv.resource_pool_id + self.datastore_id = cfg.CONF.nsxv.datastore_id + self.ha_datastore_id = cfg.CONF.nsxv.ha_datastore_id + + +class ConfiguredAvailabilityZones(object): + + def __init__(self): + self.availability_zones = {} + + # Add the configured availability zones + for az in cfg.CONF.nsxv.availability_zones: + obj = ConfiguredAvailabilityZone(az) + self.availability_zones[obj.name] = obj + # DEBUG ADIT - name max len 36 (DB) + + # add a default entry + obj = ConfiguredAvailabilityZone(None) + self.availability_zones[obj.name] = obj + + def get_resources(self): + """Return a list of all the resources in all the availability zones + """ + resources = [] + for az in self.availability_zones.values(): + resources.append(az.resource_pool) + resources.append(az.datastore_id) + if az.ha_datastore_id: + resources.append(az.ha_datastore_id) + return resources + + def get_availability_zone(self, name): + """Return an availability zone object by its name + """ + if name in self.availability_zones.keys(): + return self.availability_zones[name] + + def get_default_availability_zone(self): + """Return the default availability zone object + """ + return self.availability_zones[DEFAULT_NAME] + + def list_availability_zones(self): + """Return a list of availability zones names + """ + return self.availability_zones.keys() diff --git a/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py index 8820ab0950..b9c1db5659 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py @@ -18,7 +18,6 @@ import six from neutron.db import l3_db from neutron.db import models_v2 -from neutron.extensions import availability_zone as az_ext from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.plugins.nsx_v.vshield import edge_utils @@ -122,17 +121,9 @@ class RouterBaseDriver(RouterAbstractDriver): if is_uplink: self.update_nat_rules(context, router, router_id) - def _get_resource_pool_from_hints_by_id(self, context, router_id): + def get_router_az(self, lrouter): + return self.plugin.get_router_az(lrouter) + + def get_router_az_by_id(self, context, router_id): lrouter = self.plugin.get_router(context, router_id) - return self._get_resource_pool_from_hints(lrouter) - - def _get_resource_pools_from_hints(self, lrouter): - pools = [] - if az_ext.AZ_HINTS in lrouter: - for hint in lrouter[az_ext.AZ_HINTS]: - pools.append(self.plugin.get_res_pool_id_by_name(hint)) - return pools - - def _get_resource_pool_from_hints(self, lrouter): - pools = self._get_resource_pools_from_hints(lrouter) - return pools[0] if len(pools) > 0 else None + return self.get_router_az(lrouter) diff --git a/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py index c32273b05f..f15d34cec6 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py @@ -94,9 +94,9 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): - res_pool = self._get_resource_pool_from_hints(lrouter) + az = self.get_router_az(lrouter) self.edge_manager.create_lrouter(context, lrouter, dist=True, - res_pool=res_pool) + availability_zone=az) def update_router(self, context, router_id, router): r = router['router'] @@ -178,10 +178,9 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver): else: # Connecting plr to the tlr if new_ext_net_id is not None. if not plr_id: - res_pool = self._get_resource_pool_from_hints_by_id( - context, router_id) + availability_zone = self.get_router_az(router) plr_id = self.edge_manager.create_plr_with_tlr_id( - context, router_id, router.get('name'), res_pool=res_pool) + context, router_id, router.get('name'), availability_zone) if new_ext_net_id != org_ext_net_id and orgnexthop: # network changed, so need to remove default gateway # and all static routes before vnic can be configured diff --git a/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py index 5d6224b360..8a55878fcb 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py @@ -36,10 +36,10 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver): def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): - res_pool = self._get_resource_pool_from_hints(lrouter) + availability_zone = self.get_router_az(lrouter) self.edge_manager.create_lrouter( context, lrouter, dist=False, appliance_size=appliance_size, - res_pool=res_pool) + availability_zone=availability_zone) if allow_metadata: self.plugin.metadata_proxy_handler.configure_router_edge( lrouter['id']) diff --git a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py index 0b27e9fa41..f8df7b9b95 100644 --- a/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py +++ b/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py @@ -571,13 +571,11 @@ class RouterSharedDriver(router_driver.RouterBaseDriver): conflict_router_ids.extend(new_conflict_router_ids) conflict_router_ids = list(set(conflict_router_ids)) - res_pool = self._get_resource_pool_from_hints_by_id( - context, router_id) - + az = self.get_router_az_by_id(context, router_id) new = self.edge_manager.bind_router_on_available_edge( context, router_id, optional_router_ids, - conflict_router_ids, conflict_network_ids, intf_num, - res_pool) + conflict_router_ids, conflict_network_ids, + intf_num, az) # configure metadata service on the router. metadata_proxy_handler = self.plugin.metadata_proxy_handler if metadata_proxy_handler and new: diff --git a/vmware_nsx/plugins/nsx_v/plugin.py b/vmware_nsx/plugins/nsx_v/plugin.py index 56f884843a..e8f43d8bf0 100644 --- a/vmware_nsx/plugins/nsx_v/plugin.py +++ b/vmware_nsx/plugins/nsx_v/plugin.py @@ -93,6 +93,7 @@ from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.extensions import routersize from vmware_nsx.extensions import secgroup_rule_local_ip_prefix from vmware_nsx.extensions import securitygrouplogging as sg_logging +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import managers from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import ( @@ -197,8 +198,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, self.dvs_id = cfg.CONF.nsxv.dvs_id self.nsx_sg_utils = securitygroup_utils.NsxSecurityGroupUtils( self.nsx_v) + self._availability_zones_data = nsx_az.ConfiguredAvailabilityZones() self._validate_config() - self._build_availability_zones_data() self.sg_container_id = self._create_security_group_container() self.default_section = self._create_cluster_default_fw_section() self._process_security_groups_rules_logging() @@ -737,7 +738,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, #TODO(asarfaty): We may need to use the filters arg, but now it # is here only for overriding the original api result = {} - for az in self._availability_zones_data.keys(): + for az in self._availability_zones_data.list_availability_zones(): # Add this availability zone as a router & network resource for resource in ('router', 'network'): result[(az, resource)] = True @@ -763,20 +764,26 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, # check that all hints appear in the predefined list of availability # zones diff = (set(availability_zones) - - set(self._availability_zones_data.keys())) + set(self._availability_zones_data.list_availability_zones())) if diff: raise az_ext.AvailabilityZoneNotFound( availability_zone=diff.pop()) - def get_network_resource_pool(self, context, network_id): - network = self.get_network(context, network_id) - if az_ext.AZ_HINTS in network: - for hint in network[az_ext.AZ_HINTS]: + def get_network_or_router_az(self, object): + if az_ext.AZ_HINTS in object: + for hint in object[az_ext.AZ_HINTS]: # For now we use only the first hint - return self.get_res_pool_id_by_name(hint) + return self.get_az_by_hint(hint) # return the default - return cfg.CONF.nsxv.resource_pool_id + return self.get_default_az() + + def get_network_az(self, context, network_id): + network = self.get_network(context, network_id) + return self.get_network_or_router_az(network) + + def get_router_az(self, router): + return self.get_network_or_router_az(router) def create_network(self, context, network): net_data = network['network'] @@ -2183,13 +2190,6 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, super(NsxVPluginV2, self).delete_router(context, id) router_driver.delete_router(context, id) - def _get_availability_zone_by_edge(self, context, edge_id): - resource_pool = nsxv_db.get_edge_resource_pool( - context.session, edge_id) - if resource_pool: - av_zone = self.get_res_pool_name_by_id(resource_pool) - return av_zone - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.NETWORKS, ['_extend_availability_zone_hints']) @@ -2197,6 +2197,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list( net_db[az_ext.AZ_HINTS]) + def _get_availability_zone_name_by_edge(self, context, edge_id): + az_name = nsxv_db.get_edge_availability_zone( + context.session, edge_id) + if az_name: + return az_name + # fallback + return nsx_az.DEFAULT_NAME + def get_network_availability_zones(self, context, net_db): """Return availability zones which a network belongs to.""" @@ -2205,7 +2213,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, context.session, resource_id) if dhcp_edge_binding: edge_id = dhcp_edge_binding['edge_id'] - return [self._get_availability_zone_by_edge(context, edge_id)] + return [self._get_availability_zone_name_by_edge( + context, edge_id)] return [] def get_router_availability_zones(self, router): @@ -2213,7 +2222,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, context = n_context.get_admin_context() edge_id = self._get_edge_id_by_rtr_id(context, router["id"]) if edge_id: - return [self._get_availability_zone_by_edge(context, edge_id)] + return [self._get_availability_zone_name_by_edge( + context, edge_id)] return [] def get_router(self, context, id, fields=None): @@ -3044,8 +3054,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): # Do not support availability zones hints below 6.2 - if (cfg.CONF.nsxv.availability_zones and - len(cfg.CONF.nsxv.availability_zones) > 0): + if cfg.CONF.nsxv.availability_zones: error = (_("Availability zones are not supported for version " "%s") % ver) raise nsx_exc.NsxPluginException(err_msg=error) @@ -3064,10 +3073,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, for cluster in cfg.CONF.nsxv.cluster_moid: inventory.append((cluster, 'cluster_moid')) - # Add the availability zones resource pools - if cfg.CONF.nsxv.availability_zones: - for az in cfg.CONF.nsxv.availability_zones: - inventory.append((az, 'availability_zones')) + # Add the availability zones resources + az_resources = self._availability_zones_data.get_resources() + for res in az_resources: + inventory.append((res, 'availability_zones')) for moref, field in inventory: if moref and not self.nsx_v.vcns.validate_inventory(moref): @@ -3077,30 +3086,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, def _handle_qos_notification(self, qos_policy, event_type): qos_utils.handle_qos_notification(qos_policy, event_type, self._dvs) - def _build_availability_zones_data(self): - self._availability_zones_data = {} - if not len(cfg.CONF.nsxv.availability_zones): - return + def get_az_by_hint(self, hint): + az = self._availability_zones_data.get_availability_zone(hint) + if not az: + raise az_ext.AvailabilityZoneNotFound(availability_zone=hint) + return az - # Add the availability zones resource pools - if cfg.CONF.nsxv.availability_zones: - for az in cfg.CONF.nsxv.availability_zones: - name = self.nsx_v.vcns.get_inventory_name(az) - self._availability_zones_data[name] = az - # Add the default resource_pool_id too - az = cfg.CONF.nsxv.resource_pool_id - name = self.nsx_v.vcns.get_inventory_name(az) - self._availability_zones_data[name] = az - - def get_res_pool_id_by_name(self, name): - if name in self._availability_zones_data.keys(): - return self._availability_zones_data[name] - raise az_ext.AvailabilityZoneNotFound(availability_zone=name) - - def get_res_pool_name_by_id(self, res_pool_id): - for name in self._availability_zones_data.keys(): - if res_pool_id == self._availability_zones_data[name]: - return name + def get_default_az(self): + return self._availability_zones_data.get_default_availability_zone() # Register the callback diff --git a/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py b/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py index c60f98148c..40d7afda77 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py +++ b/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py @@ -91,15 +91,16 @@ class EdgeApplianceDriver(object): return edge - def _assemble_edge_appliances(self, resource_pool_id, datastore_id, - ha_datastore_id): + def _assemble_edge_appliances(self, availability_zone): appliances = [] - if datastore_id: + if availability_zone.datastore_id: appliances.append(self._assemble_edge_appliance( - resource_pool_id, datastore_id)) - if ha_datastore_id and cfg.CONF.nsxv.edge_ha: + availability_zone.resource_pool, + availability_zone.datastore_id)) + if availability_zone.ha_datastore_id and cfg.CONF.nsxv.edge_ha: appliances.append(self._assemble_edge_appliance( - resource_pool_id, ha_datastore_id)) + availability_zone.resource_pool, + availability_zone.ha_datastore_id)) return appliances def _assemble_edge_appliance(self, resource_pool_id, datastore_id): @@ -515,17 +516,14 @@ class EdgeApplianceDriver(object): def deploy_edge(self, resource_id, name, internal_network, jobdata=None, dist=False, wait_for_exec=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, async=True, - res_pool=None): + availability_zone=None): task_name = 'deploying-%s' % name edge_name = name edge = self._assemble_edge( edge_name, datacenter_moid=self.datacenter_moid, deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist) - res_pool = res_pool or self.resource_pool_id - appliances = self._assemble_edge_appliances(res_pool, - self.datastore_id, - self.ha_datastore_id) + appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances @@ -605,7 +603,7 @@ class EdgeApplianceDriver(object): def update_edge(self, router_id, edge_id, name, internal_network, jobdata=None, dist=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, - set_errors=False, res_pool=None): + set_errors=False, availability_zone=None): """Update edge name.""" task_name = 'update-%s' % name edge_name = name @@ -614,10 +612,7 @@ class EdgeApplianceDriver(object): deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist) edge['id'] = edge_id - res_pool = res_pool or self.resource_pool_id - appliances = self._assemble_edge_appliances(res_pool, - self.datastore_id, - self.ha_datastore_id) + appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances diff --git a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py index 2034b33d33..8fe778c02d 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py +++ b/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py @@ -42,6 +42,7 @@ from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxapi_exc @@ -102,15 +103,6 @@ def parse_backup_edge_pool_opt(): return edge_pool_dicts -def get_configured_res_pools(): - pools = [] - if cfg.CONF.nsxv.resource_pool_id: - pools.append(cfg.CONF.nsxv.resource_pool_id) - if cfg.CONF.nsxv.availability_zones: - pools.extend(cfg.CONF.nsxv.availability_zones) - return pools - - class EdgeManager(object): """Edge Appliance Management. EdgeManager provides a pool of edge appliances which we can use @@ -124,7 +116,7 @@ class EdgeManager(object): self.edge_pool_dicts = parse_backup_edge_pool_opt() self.nsxv_plugin = nsxv_manager.callbacks.plugin self.plugin = plugin - self._resource_pools = get_configured_res_pools() + self._availability_zones = nsx_az.ConfiguredAvailabilityZones() self.per_interface_rp_filter = self._get_per_edge_rp_filter_state() self.worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE) self._check_backup_edge_pools() @@ -167,7 +159,7 @@ class EdgeManager(object): def _deploy_edge(self, context, lrouter, lswitch=None, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, async=True, - res_pool=None): + availability_zone=None): """Create an edge for logical router support.""" router_id = lrouter['id'] # deploy edge @@ -183,13 +175,13 @@ class EdgeManager(object): jobdata=jobdata, wait_for_exec=True, appliance_size=appliance_size, dist=(edge_type == nsxv_constants.VDR_EDGE), async=async, - res_pool=res_pool) + availability_zone=availability_zone) return task def _deploy_backup_edges_on_db(self, context, num, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone=None): router_ids = [(vcns_const.BACKUP_ROUTER_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] for i in moves.range(num)] @@ -199,19 +191,19 @@ class EdgeManager(object): context.session, router_id, None, None, plugin_const.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, - resource_pool=res_pool) + availability_zone=availability_zone.name) return router_ids def _deploy_backup_edges_at_backend( self, context, router_ids, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone=None): eventlet.spawn_n(self._pool_creator, context, router_ids, - appliance_size, edge_type, res_pool) + appliance_size, edge_type, availability_zone) def _pool_creator(self, context, router_ids, appliance_size, - edge_type, res_pool): + edge_type, availability_zone): pool = self.worker_pool for router_id in router_ids: fake_router = { @@ -220,7 +212,7 @@ class EdgeManager(object): pool.spawn_n(self._deploy_edge, context, fake_router, appliance_size=appliance_size, edge_type=edge_type, async=False, - res_pool=res_pool) + availability_zone=availability_zone) def _delete_edge(self, context, router_binding): if router_binding['status'] == plugin_const.ERROR: @@ -255,10 +247,9 @@ class EdgeManager(object): binding['router_id'], binding['edge_id'], jobdata=jobdata, dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE)) - def _clean_all_error_edge_bindings( - self, context, - res_pool=cfg.CONF.nsxv.resource_pool_id): - filters = {'status': [plugin_const.ERROR], 'resource_pool': [res_pool]} + def _clean_all_error_edge_bindings(self, context, availability_zone): + filters = {'status': [plugin_const.ERROR], + 'availability_zone': [availability_zone.name]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} error_router_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) @@ -271,10 +262,10 @@ class EdgeManager(object): appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, db_update_lock=False, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone=None): filters = {'appliance_size': [appliance_size], 'edge_type': [edge_type], - 'resource_pool': [res_pool], + 'availability_zone': [availability_zone.name], 'status': [plugin_const.PENDING_CREATE, plugin_const.ACTIVE]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} @@ -283,8 +274,9 @@ class EdgeManager(object): def _check_backup_edge_pools(self): admin_ctx = q_context.get_admin_context() - for res_pool in self._resource_pools: - self._clean_all_error_edge_bindings(admin_ctx, res_pool=res_pool) + for az_name in self._availability_zones.list_availability_zones(): + az = self._availability_zones.get_availability_zone(az_name) + self._clean_all_error_edge_bindings(admin_ctx, az) for edge_type, v in self.edge_pool_dicts.items(): for edge_size in vcns_const.ALLOWED_EDGE_SIZES: if edge_size in v.keys(): @@ -293,24 +285,24 @@ class EdgeManager(object): edge_pool_range['minimum_pooled_edges'], edge_pool_range['maximum_pooled_edges'], appliance_size=edge_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=az) else: self._check_backup_edge_pool( 0, 0, appliance_size=edge_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=az) def _check_backup_edge_pool(self, minimum_pooled_edges, maximum_pooled_edges, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone=None): """Check edge pool's status and return one available edge for use.""" admin_ctx = q_context.get_admin_context() backup_router_bindings = self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, - db_update_lock=True, res_pool=res_pool) + db_update_lock=True, availability_zone=availability_zone) backup_num = len(backup_router_bindings) if backup_num > maximum_pooled_edges: self._delete_backup_edges_on_db( @@ -323,22 +315,24 @@ class EdgeManager(object): router_ids.extend( self._deploy_backup_edges_on_db( admin_ctx, 1, appliance_size=appliance_size, - edge_type=edge_type, res_pool=res_pool)) + edge_type=edge_type, + availability_zone=availability_zone)) new_backup_num = len( self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, - res_pool=res_pool)) + availability_zone=availability_zone)) if backup_num > maximum_pooled_edges: self._delete_backup_edges_at_backend( admin_ctx, backup_router_bindings[:backup_num - maximum_pooled_edges]) elif backup_num < minimum_pooled_edges: - self._deploy_backup_edges_at_backend(admin_ctx, - router_ids, - appliance_size=appliance_size, - edge_type=edge_type, - res_pool=res_pool) + self._deploy_backup_edges_at_backend( + admin_ctx, + router_ids, + appliance_size=appliance_size, + edge_type=edge_type, + availability_zone=availability_zone) def check_edge_active_at_backend(self, edge_id): try: @@ -350,12 +344,10 @@ class EdgeManager(object): def _get_available_router_binding(self, context, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, - res_pool=None): - if not res_pool: - res_pool = cfg.CONF.nsxv.resource_pool_id + availability_zone=None): backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=availability_zone) while backup_router_bindings: router_binding = random.choice(backup_router_bindings) if (router_binding['status'] == plugin_const.ACTIVE): @@ -566,7 +558,7 @@ class EdgeManager(object): def _allocate_edge_appliance(self, context, resource_id, name, appliance_size=nsxv_constants.COMPACT, dist=False, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone=None): """Try to allocate one available edge from pool.""" edge_type = (nsxv_constants.VDR_EDGE if dist else nsxv_constants.SERVICE_EDGE) @@ -579,18 +571,19 @@ class EdgeManager(object): plugin_const.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, - resource_pool=res_pool) + availability_zone=availability_zone.name) self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, async=False, - res_pool=res_pool) + availability_zone=availability_zone) return with locking.LockManager.get_lock('nsx-edge-request'): - self._clean_all_error_edge_bindings(context, res_pool=res_pool) + self._clean_all_error_edge_bindings( + context, availability_zone=availability_zone) available_router_binding = self._get_available_router_binding( context, appliance_size=appliance_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=availability_zone) if available_router_binding: # Update the status from ACTIVE to PENDING_UPDATE # in case of other threads select the same router binding @@ -605,11 +598,11 @@ class EdgeManager(object): plugin_const.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, - resource_pool=res_pool) + availability_zone=availability_zone.name) self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, async=False, - res_pool=res_pool) + availability_zone=availability_zone) else: LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': available_router_binding['edge_id'], @@ -625,7 +618,7 @@ class EdgeManager(object): plugin_const.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, - resource_pool=res_pool) + availability_zone=availability_zone.name) LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': available_router_binding['edge_id'], 'name': name}) @@ -648,20 +641,21 @@ class EdgeManager(object): task = self.nsxv_manager.update_edge( resource_id, available_router_binding['edge_id'], name, None, appliance_size=appliance_size, dist=dist, - jobdata=jobdata, set_errors=True, res_pool=res_pool) + jobdata=jobdata, set_errors=True, + availability_zone=availability_zone) task.wait(task_const.TaskState.RESULT) backup_num = len(self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, - db_update_lock=True, res_pool=res_pool)) + db_update_lock=True, availability_zone=availability_zone)) router_ids = self._deploy_backup_edges_on_db( context, edge_pool_range['minimum_pooled_edges'] - backup_num, appliance_size=appliance_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=availability_zone) self._deploy_backup_edges_at_backend( context, router_ids, appliance_size=appliance_size, edge_type=edge_type, - res_pool=res_pool) + availability_zone=availability_zone) def _free_edge_appliance(self, context, router_id): """Try to collect one edge to pool.""" @@ -672,7 +666,8 @@ class EdgeManager(object): return dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE) edge_id = binding['edge_id'] - res_pool = nsxv_db.get_edge_resource_pool(context.session, edge_id) + availability_zone_name = nsxv_db.get_edge_availability_zone( + context.session, edge_id) edge_pool_range = self.edge_pool_dicts[binding['edge_type']].get( binding['appliance_size']) @@ -688,7 +683,7 @@ class EdgeManager(object): plugin_const.PENDING_UPDATE, appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], - resource_pool=res_pool) + availability_zone=availability_zone_name) router_id = backup_router_id if (binding['status'] == plugin_const.ERROR or @@ -706,11 +701,15 @@ class EdgeManager(object): router_id, edge_id, jobdata=jobdata, dist=dist) return + availability_zone = self._availability_zones.get_availability_zone( + availability_zone_name) with locking.LockManager.get_lock('nsx-edge-request'): - self._clean_all_error_edge_bindings(context, res_pool=res_pool) + self._clean_all_error_edge_bindings( + context, availability_zone=availability_zone) backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=binding['appliance_size'], - edge_type=binding['edge_type'], res_pool=res_pool) + edge_type=binding['edge_type'], + availability_zone=availability_zone) backup_num = len(backup_router_bindings) # collect the edge to pool if pool not full if backup_num < edge_pool_range['maximum_pooled_edges']: @@ -718,7 +717,7 @@ class EdgeManager(object): task = self.nsxv_manager.update_edge( backup_router_id, edge_id, backup_router_id, None, appliance_size=binding['appliance_size'], dist=dist, - res_pool=res_pool) + availability_zone=availability_zone) task.wait(task_const.TaskState.RESULT) # Clean all edge vnic bindings @@ -744,13 +743,14 @@ class EdgeManager(object): self.nsxv_manager.delete_edge( router_id, edge_id, jobdata=jobdata, dist=dist) - def _allocate_dhcp_edge_appliance(self, context, resource_id, res_pool): + def _allocate_dhcp_edge_appliance(self, context, resource_id, + availability_zone): resource_name = (vcns_const.DHCP_EDGE_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, resource_id, resource_name, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'], - res_pool=res_pool) + availability_zone=availability_zone) def _free_dhcp_edge_appliance(self, context, network_id): router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] @@ -764,15 +764,13 @@ class EdgeManager(object): def create_lrouter( self, context, lrouter, lswitch=None, dist=False, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router'], - res_pool=None): + availability_zone=None): """Create an edge for logical router support.""" - if not res_pool: - res_pool = cfg.CONF.nsxv.resource_pool_id router_name = self._build_lrouter_name(lrouter['id'], lrouter['name']) self._allocate_edge_appliance( context, lrouter['id'], router_name, appliance_size=appliance_size, - dist=dist, res_pool=res_pool) + dist=dist, availability_zone=availability_zone) def delete_lrouter(self, context, router_id, dist=False): self._free_edge_appliance(context, router_id) @@ -956,12 +954,12 @@ class EdgeManager(object): return new_id def _get_available_edges(self, context, network_id, conflicting_nets, - res_pool=cfg.CONF.nsxv.resource_pool_id): + availability_zone): if conflicting_nets is None: conflicting_nets = [] conflict_edge_ids = [] available_edge_ids = [] - filters = {'resource_pool': [res_pool]} + filters = {'availability_zone': [availability_zone.name]} router_bindings = nsxv_db.get_nsxv_router_bindings(context.session, filters=filters) all_dhcp_edges = {binding['router_id']: binding['edge_id'] for @@ -994,12 +992,12 @@ class EdgeManager(object): available_edge_ids.append(x) return (conflict_edge_ids, available_edge_ids) - def _get_used_edges(self, context, subnet, resource_pool): + def _get_used_edges(self, context, subnet, availability_zone): """Returns conflicting and available edges for the subnet.""" conflicting = self.plugin._get_conflicting_networks_for_subnet( context, subnet) return self._get_available_edges(context, subnet['network_id'], - conflicting, resource_pool) + conflicting, availability_zone) def remove_network_from_dhcp_edge(self, context, network_id, edge_id): old_binding = nsxv_db.get_edge_vnic_binding( @@ -1040,7 +1038,7 @@ class EdgeManager(object): old_tunnel_index, network_id) def reuse_existing_dhcp_edge(self, context, edge_id, resource_id, - network_id): + network_id, availability_zone): app_size = vcns_const.SERVICE_SIZE_MAPPING['dhcp'] # There may be edge cases when we are waiting for edges to deploy # and the underlying db session may hit a timeout. So this creates @@ -1050,15 +1048,14 @@ class EdgeManager(object): context.session, resource_id, edge_id, None, plugin_const.ACTIVE, appliance_size=app_size, - resource_pool=cfg.CONF.nsxv.resource_pool_id) + availability_zone=availability_zone.name) nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, edge_id, network_id) def allocate_new_dhcp_edge(self, context, network_id, resource_id, - res_pool): - if not res_pool: - res_pool = cfg.CONF.nsxv.resource_pool_id - self._allocate_dhcp_edge_appliance(context, resource_id, res_pool) + availability_zone): + self._allocate_dhcp_edge_appliance(context, resource_id, + availability_zone) with locking.LockManager.get_lock('nsx-edge-pool'): new_edge = nsxv_db.get_nsxv_router_binding(context.session, resource_id) @@ -1074,7 +1071,7 @@ class EdgeManager(object): If new edge was allocated, return resource_id, else return None """ - res_pool = self.plugin.get_network_resource_pool(context, network_id) + availability_zone = self.plugin.get_network_az(context, network_id) # Check if the network has one related dhcp edge resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding(context.session, @@ -1086,7 +1083,7 @@ class EdgeManager(object): edge_id = dhcp_edge_binding['edge_id'] (conflict_edge_ids, available_edge_ids) = self._get_used_edges(context, subnet, - res_pool) + availability_zone) LOG.debug("The available edges %s, the conflict edges %s " "at present is using edge %s", available_edge_ids, conflict_edge_ids, edge_id) @@ -1111,7 +1108,8 @@ class EdgeManager(object): LOG.debug("Select edge %s to support dhcp for " "network %s", new_id, network_id) self.reuse_existing_dhcp_edge( - context, new_id, resource_id, network_id) + context, new_id, resource_id, network_id, + availability_zone) else: allocate_new_edge = True else: @@ -1121,7 +1119,7 @@ class EdgeManager(object): with locking.LockManager.get_lock('nsx-edge-pool'): (conflict_edge_ids, available_edge_ids) = self._get_used_edges(context, subnet, - res_pool) + availability_zone) LOG.debug('The available edges %s, the conflict edges %s', available_edge_ids, conflict_edge_ids) # There is available one @@ -1133,7 +1131,8 @@ class EdgeManager(object): "%s", new_id, network_id) with locking.LockManager.get_lock(str(new_id)): self.reuse_existing_dhcp_edge( - context, new_id, resource_id, network_id) + context, new_id, resource_id, network_id, + availability_zone) else: allocate_new_edge = True else: @@ -1141,7 +1140,7 @@ class EdgeManager(object): if allocate_new_edge: self.allocate_new_dhcp_edge(context, network_id, resource_id, - res_pool) + availability_zone) # If a new Edge was allocated, return resource_id return resource_id @@ -1255,16 +1254,16 @@ class EdgeManager(object): # Find DHCP Edge which is associated with this VDR vdr_dhcp_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr( context.session, vdr_router_id) + availability_zone = self.plugin.get_network_az(context, network_id) if vdr_dhcp_binding: dhcp_edge_id = vdr_dhcp_binding['dhcp_edge_id'] self.reuse_existing_dhcp_edge( - context, dhcp_edge_id, resource_id, network_id) + context, dhcp_edge_id, resource_id, network_id, + availability_zone) else: # Attach to DHCP Edge - resource_pool = self.plugin.get_network_resource_pool(context, - network_id) dhcp_edge_id = self.allocate_new_dhcp_edge( - context, network_id, resource_id, resource_pool) + context, network_id, resource_id, availability_zone) self.plugin.metadata_proxy_handler.configure_router_edge( resource_id, context) @@ -1464,7 +1463,7 @@ class EdgeManager(object): return plr_router_id def create_plr_with_tlr_id(self, context, router_id, router_name, - res_pool=None): + availability_zone): # Add an internal network preparing for connecting the VDR # to a PLR tlr_edge_id = nsxv_db.get_nsxv_router_binding( @@ -1494,7 +1493,8 @@ class EdgeManager(object): # Handle plr relative op plr_router = {'name': router_name, 'id': (vcns_const.PLR_EDGE_PREFIX + _uuid())[:36]} - self.create_lrouter(context, plr_router, res_pool=res_pool) + self.create_lrouter(context, plr_router, + availability_zone=availability_zone) binding = nsxv_db.get_nsxv_router_binding( context.session, plr_router['id']) plr_edge_id = binding['edge_id'] @@ -1573,12 +1573,10 @@ class EdgeManager(object): def bind_router_on_available_edge( self, context, target_router_id, optional_router_ids, conflict_router_ids, - conflict_network_ids, network_number, resource_pool): + conflict_network_ids, network_number, availability_zone): """Bind logical router on an available edge. Return True if the logical router is bound to a new edge. """ - if not resource_pool: - resource_pool = cfg.CONF.nsxv.resource_pool_id with locking.LockManager.get_lock('nsx-edge-router'): optional_edge_ids = [] conflict_edge_ids = [] @@ -1586,7 +1584,7 @@ class EdgeManager(object): binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if (binding and binding.status == plugin_const.ACTIVE and - binding.resource_pool == resource_pool and + binding.availability_zone == availability_zone.name and binding.edge_id not in optional_edge_ids): optional_edge_ids.append(binding.edge_id) @@ -1626,14 +1624,14 @@ class EdgeManager(object): edge_binding.status, edge_binding.appliance_size, edge_binding.edge_type, - resource_pool=resource_pool) + availability_zone=availability_zone.name) else: router_name = ('shared' + '-' + _uuid())[ :vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, target_router_id, router_name, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router'], - res_pool=resource_pool) + availability_zone=availability_zone) return True def unbind_router_on_edge(self, context, router_id): @@ -1766,7 +1764,7 @@ class EdgeManager(object): def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False, - res_pool=None): + availability_zone=None): """Create an edge for logical router support.""" router_id = lrouter['id'] router_name = lrouter['name'] + '-' + router_id @@ -1776,7 +1774,7 @@ def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False, context.session, router_id, None, None, plugin_const.PENDING_CREATE, appliance_size=appliance_size, - resource_pool=res_pool) + availability_zone=availability_zone.name) # deploy edge jobdata = { @@ -1792,7 +1790,7 @@ def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False, task = nsxv_manager.deploy_edge( router_id, router_name, internal_network=None, dist=dist, jobdata=jobdata, appliance_size=appliance_size, - res_pool=res_pool) + availability_zone=availability_zone) task.wait(task_const.TaskState.RESULT) diff --git a/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py b/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py index 44eca000cc..efb2d9b9d9 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py +++ b/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py @@ -46,9 +46,6 @@ class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, self.insecure = cfg.CONF.nsxv.insecure self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id - self.resource_pool_id = cfg.CONF.nsxv.resource_pool_id - self.datastore_id = cfg.CONF.nsxv.datastore_id - self.ha_datastore_id = cfg.CONF.nsxv.ha_datastore_id self.external_network = cfg.CONF.nsxv.external_network self._pid = None self._task_manager = None diff --git a/vmware_nsx/tests/unit/nsx_v/test_plugin.py b/vmware_nsx/tests/unit/nsx_v/test_plugin.py index 1a076e50a5..c77a63e329 100644 --- a/vmware_nsx/tests/unit/nsx_v/test_plugin.py +++ b/vmware_nsx/tests/unit/nsx_v/test_plugin.py @@ -59,6 +59,7 @@ from vmware_nsx.extensions import routersize as router_size from vmware_nsx.extensions import routertype as router_type from vmware_nsx.extensions import securitygrouplogging from vmware_nsx.extensions import vnicindex as ext_vnic_idx +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.drivers import ( exclusive_router_driver as ex_router_driver) from vmware_nsx.plugins.nsx_v.drivers import ( @@ -629,12 +630,12 @@ class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase): ctx, data) def test_create_network_with_az_hint(self): + az_name = 'az7' + az_config = az_name + ':respool-7:datastore-7' + cfg.CONF.set_override('availability_zones', [az_config], group="nsxv") p = manager.NeutronManager.get_plugin() + p._availability_zones_data = nsx_az.ConfiguredAvailabilityZones() ctx = context.get_admin_context() - alter_pool_id = 'respool-7' - alter_pool_name = 'rs-7' - p._availability_zones_data = {'default': self.default_res_pool, - alter_pool_name: alter_pool_id} data = {'network': { 'name': 'test-qos', @@ -642,12 +643,12 @@ class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase): 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, - 'availability_zone_hints': [alter_pool_name] + 'availability_zone_hints': [az_name] }} # network creation should succeed net = p.create_network(ctx, data) - self.assertEqual([alter_pool_name], + self.assertEqual([az_name], net['availability_zone_hints']) # the availability zone is still empty until subnet creation self.assertEqual([], @@ -3082,25 +3083,25 @@ class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, router) def test_create_router_with_az_hint(self): + az_name = 'az7' + az_config = az_name + ':respool-7:datastore-7' + cfg.CONF.set_override('availability_zones', [az_config], group="nsxv") p = manager.NeutronManager.get_plugin() - alter_pool_id = 'respool-7' - alter_pool_name = 'rs-7' - p._availability_zones_data = {'default': self.default_res_pool, - alter_pool_name: alter_pool_id} + p._availability_zones_data = nsx_az.ConfiguredAvailabilityZones() p._get_edge_id_by_rtr_id = p.real_get_edge router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': context.get_admin_context().tenant_id, 'router_type': 'exclusive', - 'availability_zone_hints': [alter_pool_name]}} + 'availability_zone_hints': [az_name]}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) - self.assertEqual([alter_pool_name], + self.assertEqual([az_name], returned_router['availability_zone_hints']) - self.assertEqual([alter_pool_name], + self.assertEqual([az_name], returned_router['availability_zones']) @@ -3376,11 +3377,11 @@ class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, def _test_create_rotuer_with_az_hint(self, with_hint): # init the availability zones in the plugin + az_name = 'az7' + az_config = az_name + ':respool-7:datastore-7' + cfg.CONF.set_override('availability_zones', [az_config], group="nsxv") p = manager.NeutronManager.get_plugin() - pool_id = 'respool-7' - pool_name = 'rs-7' - p._availability_zones_data = {'default': self.default_res_pool, - pool_name: pool_id} + p._availability_zones_data = nsx_az.ConfiguredAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, @@ -3388,12 +3389,12 @@ class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, 'tenant_id': context.get_admin_context().tenant_id, 'distributed': True}} if with_hint: - router['router']['availability_zone_hints'] = [pool_name] + router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: - self.assertEqual([pool_name], + self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], @@ -3401,10 +3402,10 @@ class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), returned_router['id']) - res_pool = nsxv_db.get_edge_resource_pool( + res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) - expected_pool = pool_id if with_hint else self.default_res_pool - self.assertEqual(expected_pool, res_pool) + expected_az = az_name if with_hint else 'default' + self.assertEqual(expected_az, res_az) def test_create_rotuer_with_az_hint(self): self._test_create_rotuer_with_az_hint(True) @@ -4512,11 +4513,11 @@ class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, def _test_create_rotuer_with_az_hint(self, with_hint): # init the availability zones in the plugin + az_name = 'az7' + az_config = az_name + ':respool-7:datastore-7' + cfg.CONF.set_override('availability_zones', [az_config], group="nsxv") p = manager.NeutronManager.get_plugin() - pool_id = 'respool-7' - pool_name = 'rs-7' - p._availability_zones_data = {'default': self.default_res_pool, - pool_name: pool_id} + p._availability_zones_data = nsx_az.ConfiguredAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, @@ -4524,12 +4525,12 @@ class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, 'tenant_id': context.get_admin_context().tenant_id, 'router_type': 'shared'}} if with_hint: - router['router']['availability_zone_hints'] = [pool_name] + router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: - self.assertEqual([pool_name], + self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], @@ -4546,10 +4547,10 @@ class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, None) edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), router_id) - res_pool = nsxv_db.get_edge_resource_pool( + res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) - expected_pool = pool_id if with_hint else self.default_res_pool - self.assertEqual(expected_pool, res_pool) + expected_az = az_name if with_hint else 'default' + self.assertEqual(expected_az, res_az) def test_create_rotuer_with_az_hint(self): self._test_create_rotuer_with_az_hint(True) diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py b/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py index 5c5407a2dc..2a255d8c4a 100644 --- a/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py +++ b/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py @@ -25,6 +25,7 @@ from neutron_lib import exceptions as n_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils @@ -38,7 +39,7 @@ EDGE_CREATING = 'creating-' EDGE_ERROR1 = 'error1-' EDGE_ERROR2 = 'error2-' EDGE_DELETING = 'deleting-' -DEFAULT_RES_POOL = 'respool-28' +DEFAULT_AZ = 'default' class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): @@ -57,8 +58,8 @@ class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): self.ctx = context.get_admin_context() self.addCleanup(nsxv_manager_p.stop) self.fake_jobdata = {'router_id': 'fake_id', 'context': self.ctx} - cfg.CONF.set_override("resource_pool_id", DEFAULT_RES_POOL, - group="nsxv") + self.az = (nsx_az.ConfiguredAvailabilityZones(). + get_default_availability_zone()) def _create_router(self, name='router1'): return {'name': name, @@ -81,12 +82,13 @@ class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): binding['edge_id'], None, binding['status'], appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], - resource_pool=binding['resource_pool']) + availability_zone=binding['availability_zone']) class DummyPlugin(object): - def get_network_resource_pool(self, context, network_id): - return cfg.CONF.nsxv.resource_pool_id + def get_network_az(self, context, network_id): + return (nsx_az.ConfiguredAvailabilityZones(). + get_default_availability_zone()) class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): @@ -104,19 +106,19 @@ class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): 'router_id': 'backup-11111111-1111', 'appliance_size': 'compact', 'edge_type': 'service', - 'resource_pool': DEFAULT_RES_POOL}, + 'availability_zone': DEFAULT_AZ}, {'status': plugin_const.PENDING_DELETE, 'edge_id': 'edge-2', 'router_id': 'dhcp-22222222-2222', 'appliance_size': 'compact', 'edge_type': 'service', - 'resource_pool': DEFAULT_RES_POOL}, + 'availability_zone': DEFAULT_AZ}, {'status': plugin_const.PENDING_DELETE, 'edge_id': 'edge-3', 'router_id': 'backup-33333333-3333', 'appliance_size': 'compact', 'edge_type': 'service', - 'resource_pool': DEFAULT_RES_POOL}] + 'availability_zone': DEFAULT_AZ}] self._populate_vcns_router_binding(fake_edge_pool) fake_network = self._create_network() fake_subnet = self._create_subnet(fake_network['id']) @@ -132,7 +134,7 @@ class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): resource_id, 'edge-1', mock.ANY, None, jobdata=jobdata, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'], dist=False, set_errors=True, - res_pool=cfg.CONF.nsxv.resource_pool_id) + availability_zone=mock.ANY) def test_get_random_available_edge(self): available_edge_ids = ['edge-1', 'edge-2'] @@ -200,10 +202,10 @@ class EdgeUtilsTestCase(EdgeUtilsTestCaseMixin): self.nsxv_manager.deploy_edge.reset_mock() edge_utils.create_lrouter(self.nsxv_manager, self.ctx, lrouter, lswitch=None, dist=False, - res_pool=DEFAULT_RES_POOL) + availability_zone=self.az) self.nsxv_manager.deploy_edge.assert_called_once_with( lrouter['id'], (lrouter['name'] + '-' + lrouter['id']), - internal_network=None, dist=False, res_pool=DEFAULT_RES_POOL, + internal_network=None, dist=False, availability_zone=self.az, jobdata={'router_id': lrouter['id'], 'lrouter': lrouter, 'lswitch': None, @@ -378,104 +380,95 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): 'nsxv') self.assertRaises(n_exc.Invalid, edge_utils.parse_backup_edge_pool_opt) - def _create_available_router_bindings( - self, num, size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): - id_prefix = EDGE_AVAIL + size + '-' + edge_type - return [{'status': plugin_const.ACTIVE, + def _create_router_bindings(self, num, status, id_prefix, size, + edge_type, availability_zone): + if not availability_zone: + availability_zone = self.az + return [{'status': status, 'edge_id': id_prefix + '-edge-' + str(i), 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + id_prefix + str(i)), 'appliance_size': size, 'edge_type': edge_type, - 'resource_pool': resource_pool} + 'availability_zone': availability_zone.name} for i in moves.range(num)] + def _create_available_router_bindings( + self, num, size=nsxv_constants.LARGE, + edge_type=nsxv_constants.SERVICE_EDGE, + availability_zone=None): + status = plugin_const.ACTIVE + id_prefix = EDGE_AVAIL + size + '-' + edge_type + return self._create_router_bindings( + num, status, id_prefix, size, edge_type, + availability_zone) + def _create_creating_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + availability_zone=None): + status = plugin_const.PENDING_CREATE id_prefix = EDGE_CREATING + size + '-' + edge_type - return [{'status': plugin_const.PENDING_CREATE, - 'edge_id': id_prefix + '-edge-' + str(i), - 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + - id_prefix + str(i)), - 'appliance_size': size, - 'edge_type': edge_type, - 'resource_pool': resource_pool} - for i in moves.range(num)] + return self._create_router_bindings( + num, status, id_prefix, size, edge_type, + availability_zone) def _create_error_router_bindings( self, num, status=plugin_const.ERROR, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + availability_zone=None): id_prefix = EDGE_ERROR1 + size + '-' + edge_type - return [{'status': status, - 'edge_id': id_prefix + '-edge-' + str(i), - 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + - id_prefix + str(i)), - 'appliance_size': size, - 'edge_type': edge_type, - 'resource_pool': resource_pool} - for i in moves.range(num)] + return self._create_router_bindings( + num, status, id_prefix, size, edge_type, + availability_zone) def _create_error_router_bindings_at_backend( self, num, status=plugin_const.ACTIVE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + availability_zone=None): id_prefix = EDGE_ERROR2 + size + '-' + edge_type - return [{'status': status, - 'edge_id': id_prefix + '-edge-' + str(i), - 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + - id_prefix + str(i)), - 'appliance_size': size, - 'edge_type': edge_type, - 'resource_pool': resource_pool} - for i in moves.range(num)] + return self._create_router_bindings( + num, status, id_prefix, size, edge_type, + availability_zone) def _create_deleting_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + availability_zone=None): + status = plugin_const.PENDING_DELETE id_prefix = EDGE_DELETING + size + '-' + edge_type - return [{'status': plugin_const.PENDING_DELETE, - 'edge_id': id_prefix + '-edge-' + str(i), - 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + - id_prefix + str(i)), - 'appliance_size': size, - 'edge_type': edge_type, - 'resource_pool': resource_pool} - for i in moves.range(num)] + return self._create_router_bindings( + num, status, id_prefix, size, edge_type, + availability_zone) def _create_edge_pools(self, avail, creating, error, error_at_backend, deleting, size=nsxv_constants.LARGE, - edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + edge_type=nsxv_constants.SERVICE_EDGE): """Create a backup edge pool with different status of edges. Backup edges would be edges with avail, creating and error_at_backend, while available edges would only be edges with avail status. """ + availability_zone = self.az return ( self._create_error_router_bindings( error, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, - resource_pool=resource_pool)) + availability_zone=availability_zone)) def _create_backup_router_bindings( self, avail, creating, error, error_at_backend, deleting, @@ -483,24 +476,26 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): error_at_backend_status=plugin_const.PENDING_DELETE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, - resource_pool=DEFAULT_RES_POOL): + availability_zone=None): + if not availability_zone: + availability_zone = self.az return ( self._create_error_router_bindings( error, status=error_status, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, status=error_at_backend_status, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, - resource_pool=resource_pool) + + availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, - resource_pool=resource_pool)) + availability_zone=availability_zone)) def _verify_router_bindings(self, exp_bindings, act_db_bindings): exp_dict = dict(zip([binding['router_id'] @@ -510,7 +505,7 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): 'status': binding['status'], 'appliance_size': binding['appliance_size'], 'edge_type': binding['edge_type'], - 'resource_pool': binding['resource_pool']} + 'availability_zone': binding['availability_zone']} for binding in act_db_bindings] act_dict = dict(zip([binding['router_id'] for binding in act_bindings], act_bindings)) @@ -527,7 +522,7 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): error_at_backend_status=plugin_const.ACTIVE, size=nsxv_constants.LARGE) backup_bindings = self.edge_manager._get_backup_edge_bindings(self.ctx, - appliance_size=nsxv_constants.LARGE, res_pool=DEFAULT_RES_POOL) + appliance_size=nsxv_constants.LARGE, availability_zone=self.az) self._verify_router_bindings(expect_backup_bindings, backup_bindings) def test_get_available_router_bindings(self): @@ -540,14 +535,15 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): expect_backup_bindings = self._create_backup_router_bindings( 1, 2, 3, 0, 5, error_status=plugin_const.ERROR) binding = self.edge_manager._get_available_router_binding( - self.ctx, appliance_size=appliance_size, edge_type=edge_type) + self.ctx, appliance_size=appliance_size, edge_type=edge_type, + availability_zone=self.az) router_bindings = [ binding_db for binding_db in nsxv_db.get_nsxv_router_bindings( self.ctx.session) if (binding_db['appliance_size'] == appliance_size and binding_db['edge_type'] == edge_type and - binding_db['resource_pool'] == DEFAULT_RES_POOL)] + binding_db['availability_zone'] == 'default')] self._verify_router_bindings(expect_backup_bindings, router_bindings) edge_id = (EDGE_AVAIL + appliance_size + '-' + edge_type + '-edge-' + str(0)) @@ -567,7 +563,7 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self.edge_manager._check_backup_edge_pool( 0, 3, appliance_size=appliance_size, edge_type=edge_type, - res_pool=DEFAULT_RES_POOL) + availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) @@ -588,7 +584,7 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self.edge_manager._check_backup_edge_pool( 5, 10, appliance_size=appliance_size, edge_type=edge_type, - res_pool=DEFAULT_RES_POOL) + availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) @@ -599,7 +595,7 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self.assertEqual(2, len(router_bindings)) edge_utils.eventlet.spawn_n.assert_called_with( mock.ANY, mock.ANY, binding_ids, appliance_size, - edge_type, DEFAULT_RES_POOL) + edge_type, self.az) def test_check_backup_edge_pools_with_empty_conf(self): pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + @@ -686,7 +682,8 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): def test_allocate_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name') + self.ctx, 'fake_id', 'fake_name', + availability_zone=self.az) assert not self.edge_manager._clean_all_error_edge_bindings.called def test_allocate_large_edge_appliance_with_default(self): @@ -699,14 +696,15 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', - appliance_size=nsxv_constants.LARGE) + appliance_size=nsxv_constants.LARGE, + availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.update_edge.assert_has_calls( [mock.call('fake_id', edge_id, 'fake_name', None, jobdata=self.fake_jobdata, set_errors=True, appliance_size=nsxv_constants.LARGE, dist=False, - res_pool=None)]) + availability_zone=self.az)]) def test_allocate_compact_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts @@ -718,14 +716,15 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', - appliance_size=nsxv_constants.COMPACT) + appliance_size=nsxv_constants.COMPACT, + availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.COMPACT + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.update_edge.assert_has_calls( [mock.call('fake_id', edge_id, 'fake_name', None, jobdata=self.fake_jobdata, set_errors=True, appliance_size=nsxv_constants.COMPACT, dist=False, - res_pool=None)]) + availability_zone=self.az)]) def test_allocate_large_edge_appliance_with_vdr(self): self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts @@ -737,19 +736,21 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', dist=True, - appliance_size=nsxv_constants.LARGE) + appliance_size=nsxv_constants.LARGE, + availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.VDR_EDGE + '-edge-' + str(0)) self.nsxv_manager.update_edge.assert_has_calls( [mock.call('fake_id', edge_id, 'fake_name', None, jobdata=self.fake_jobdata, set_errors=True, appliance_size=nsxv_constants.LARGE, dist=True, - res_pool=None)]) + availability_zone=self.az)]) def test_free_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name') + self.ctx, 'fake_id', 'fake_name', + availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.edge_manager._clean_all_error_edge_bindings.called @@ -757,14 +758,15 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): def test_free_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name') + self.ctx, 'fake_id', 'fake_name', + availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.nsxv_manager.delete_edge.called self.nsxv_manager.update_edge.assert_has_calls( [mock.call(mock.ANY, mock.ANY, mock.ANY, None, appliance_size=nsxv_constants.COMPACT, dist=False, - res_pool=None)]) + availability_zone=mock.ANY)]) def test_free_edge_appliance_with_default_with_full(self): self.edge_pool_dicts = { @@ -775,7 +777,8 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): 'maximum_pooled_edges': 3}}, nsxv_constants.VDR_EDGE: {}} self.edge_manager._allocate_edge_appliance( - self.ctx, 'fake_id', 'fake_name') + self.ctx, 'fake_id', 'fake_name', + availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert self.nsxv_manager.delete_edge.called diff --git a/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py b/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py index ae5874b995..e0540ec56e 100644 --- a/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py +++ b/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py @@ -20,6 +20,7 @@ from neutron.tests import base from oslo_config import cfg import six +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.tasks import ( @@ -326,6 +327,8 @@ class VcnsDriverTestCase(base.BaseTestCase): self.vcns_driver = vcns_driver.VcnsDriver(self) + self.az = (nsx_az.ConfiguredAvailabilityZones(). + get_default_availability_zone()) self.edge_id = None self.result = None @@ -338,7 +341,8 @@ class VcnsDriverTestCase(base.BaseTestCase): def _deploy_edge(self): task = self.vcns_driver.deploy_edge( - 'router-id', 'myedge', 'internal-network', {}, wait_for_exec=True) + 'router-id', 'myedge', 'internal-network', {}, wait_for_exec=True, + availability_zone=self.az) self.assertEqual(self.edge_id, 'edge-1') task.wait(ts_const.TaskState.RESULT) return task @@ -394,7 +398,7 @@ class VcnsDriverTestCase(base.BaseTestCase): jobdata = {} task = self.vcns_driver.deploy_edge( 'router-id', 'myedge', 'internal-network', jobdata=jobdata, - wait_for_exec=True) + wait_for_exec=True, availability_zone=self.az) self.assertEqual(self.edge_id, 'edge-1') task.wait(ts_const.TaskState.RESULT) self.assertEqual(task.status, ts_const.TaskStatus.COMPLETED) @@ -405,15 +409,17 @@ class VcnsDriverTestCase(base.BaseTestCase): "router_id": "fake_router_id"} self.vcns_driver.deploy_edge( 'router-id', 'myedge', 'internal-network', jobdata=jobdata, - wait_for_exec=True, async=False) + wait_for_exec=True, async=False, availability_zone=self.az) status = self.vcns_driver.get_edge_status('edge-1') self.assertEqual(status, vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE) def test_deploy_edge_fail(self): task1 = self.vcns_driver.deploy_edge( - 'router-1', 'myedge', 'internal-network', {}, wait_for_exec=True) + 'router-1', 'myedge', 'internal-network', {}, wait_for_exec=True, + availability_zone=self.az) task2 = self.vcns_driver.deploy_edge( - 'router-2', 'myedge', 'internal-network', {}, wait_for_exec=True) + 'router-2', 'myedge', 'internal-network', {}, wait_for_exec=True, + availability_zone=self.az) task1.wait(ts_const.TaskState.RESULT) task2.wait(ts_const.TaskState.RESULT) self.assertEqual(task2.status, ts_const.TaskStatus.ERROR)