diff --git a/vmware_nsx/common/config.py b/vmware_nsx/common/config.py index 7b46f23869..c6415172db 100644 --- a/vmware_nsx/common/config.py +++ b/vmware_nsx/common/config.py @@ -729,6 +729,10 @@ nsxv_opts = [ default=False, help=_("Use default block all rule when no security groups " "are set on a port and port security is enabled")), + cfg.BoolOpt('use_routers_as_lbaas_platform', + default=False, + help=_("Use subnet's exclusive router as a platform for " + "LBaaS")), ] # define the configuration of each NSX-V availability zone. diff --git a/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py b/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py index 5b504f6a42..f2dff9e439 100644 --- a/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py +++ b/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py @@ -39,6 +39,27 @@ def get_lb_resource_id(lb_id): return (RESOURCE_ID_PFX + lb_id)[:36] +def get_lbaas_edge_id_for_subnet(context, plugin, subnet_id, tenant_id): + """ + Grab the id of an Edge appliance that is connected to subnet_id. + """ + subnet = plugin.get_subnet(context, subnet_id) + net_id = subnet.get('network_id') + filters = {'network_id': [net_id], + 'device_owner': ['network:router_interface'], + 'tenant_id': [tenant_id]} + attached_routers = plugin.get_ports(context.elevated(), + filters=filters, + fields=['device_id']) + + for attached_router in attached_routers: + router = plugin.get_router(context, attached_router['device_id']) + if router.get('router_type') == 'exclusive': + rtr_bindings = nsxv_db.get_nsxv_router_binding(context.session, + router['id']) + return rtr_bindings['edge_id'] + + def get_lb_edge_name(context, lb_id): """Look for the resource name of the edge hosting the LB. diff --git a/vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py b/vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py index 849ed0f7b7..0836d61ce7 100644 --- a/vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_v/v2/loadbalancer_mgr.py @@ -19,6 +19,7 @@ from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions as n_exc +from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils @@ -60,10 +61,18 @@ class EdgeLoadBalancerManager(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def create(self, context, lb): - lb_size = self._get_lb_flavor_size(context, lb.flavor_id) - edge_id = lb_common.get_lbaas_edge_id( - context, self.core_plugin, lb.id, lb.vip_address, lb.vip_subnet_id, - lb.tenant_id, lb_size) + if cfg.CONF.nsxv.use_routers_as_lbaas_platform: + edge_id = lb_common.get_lbaas_edge_id_for_subnet( + context, self.core_plugin, lb.vip_subnet_id, lb.tenant_id) + if not edge_id: + msg = _( + 'No suitable Edge found for subnet %s') % lb.vip_subnet_id + raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) + else: + lb_size = self._get_lb_flavor_size(context, lb.flavor_id) + edge_id = lb_common.get_lbaas_edge_id( + context, self.core_plugin, lb.id, lb.vip_address, + lb.vip_subnet_id, lb.tenant_id, lb_size) if not edge_id: msg = _('Failed to allocate Edge on subnet %(sub)s for ' @@ -72,14 +81,22 @@ class EdgeLoadBalancerManager(base_mgr.EdgeLoadbalancerBaseManager): raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) try: - lb_common.enable_edge_acceleration(self.vcns, edge_id) + if cfg.CONF.nsxv.use_routers_as_lbaas_platform: + if not nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( + context.session, edge_id): + lb_common.enable_edge_acceleration(self.vcns, edge_id) + lb_common.add_vip_as_secondary_ip(self.vcns, edge_id, + lb.vip_address) + else: + lb_common.enable_edge_acceleration(self.vcns, edge_id) edge_fw_rule_id = lb_common.add_vip_fw_rule( self.vcns, edge_id, lb.id, lb.vip_address) # set LB default rule - lb_common.set_lb_firewall_default_rule(self.vcns, edge_id, - 'accept') + if not cfg.CONF.nsxv.use_routers_as_lbaas_platform: + lb_common.set_lb_firewall_default_rule(self.vcns, edge_id, + 'accept') nsxv_db.add_nsxv_lbaas_loadbalancer_binding( context.session, lb.id, edge_id, edge_fw_rule_id, diff --git a/vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py b/vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py index 918d32a391..a1745d0e79 100644 --- a/vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_v/v2/member_mgr.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils @@ -61,8 +62,10 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager): edge_pool_id = pool_binding['edge_pool_id'] with locking.LockManager.get_lock(edge_id): - if not lb_common.is_lb_on_router_edge( - context.elevated(), self.core_plugin, edge_id): + if (not cfg.CONF.nsxv.use_routers_as_lbaas_platform and + not lb_common.is_lb_on_router_edge(context.elevated(), + self.core_plugin, + edge_id)): # Verify that Edge appliance is connected to the member's # subnet (only if this is a dedicated loadbalancer edge) if not lb_common.get_lb_interface( @@ -153,24 +156,26 @@ class EdgeMemberManager(base_mgr.EdgeLoadbalancerBaseManager): context.session, lb_id, member.pool_id) edge_id = lb_binding['edge_id'] - with locking.LockManager.get_lock(edge_id): - # we should remove LB subnet interface if no members are attached - # and this is not the LB's VIP interface - remove_interface = True - if member.subnet_id == member.pool.loadbalancer.vip_subnet_id: - remove_interface = False - else: - for m in member.pool.members: - if m.subnet_id == member.subnet_id and m.id != member.id: - remove_interface = False - if remove_interface: - lb_common.delete_lb_interface(context, self.core_plugin, lb_id, - member.subnet_id) + if not cfg.CONF.nsxv.use_routers_as_lbaas_platform: + with locking.LockManager.get_lock(edge_id): + # we should remove LB subnet interface if no members are + # attached and this is not the LB's VIP interface + remove_interface = True + if member.subnet_id == member.pool.loadbalancer.vip_subnet_id: + remove_interface = False + else: + for m in member.pool.members: + if (m.subnet_id == member.subnet_id and + m.id != member.id): + remove_interface = False + if remove_interface: + lb_common.delete_lb_interface(context, self.core_plugin, + lb_id, member.subnet_id) - if not pool_binding: - self.lbv2_driver.member.successful_completion( - context, member, delete=True) - return + if not pool_binding: + self.lbv2_driver.member.successful_completion( + context, member, delete=True) + return edge_pool_id = pool_binding['edge_pool_id'] edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1]