diff --git a/devstack/tools/nsxp_cleanup.py b/devstack/tools/nsxp_cleanup.py index 8a58718845..0d2b3d7934 100755 --- a/devstack/tools/nsxp_cleanup.py +++ b/devstack/tools/nsxp_cleanup.py @@ -365,6 +365,65 @@ class NSXClient(object): except exceptions.ManagerError as e: print("Failed to delete service %s: %s" % (srv['id'], e)) + def _cleanup_lb_resource(self, service, service_name): + r_list = self.get_os_resources(service.list()) + + print("Number of %s to be deleted: %d" % (service_name, len(r_list))) + for r in r_list: + try: + service.delete( + r['id']) + except Exception as e: + print("ERROR: Failed to delete %s %s, error %s" % + (r['resource_type'], r['id'], e)) + + def cleanup_lb_virtual_servers(self): + self._cleanup_lb_resource(self.nsxpolicy.load_balancer.virtual_server, + 'LB virtual servers') + + def cleanup_lb_server_pools(self): + self._cleanup_lb_resource(self.nsxpolicy.load_balancer.lb_pool, + 'LB pools') + + def cleanup_lb_profiles(self): + lb_svc = self.nsxpolicy.load_balancer + self._cleanup_lb_resource(lb_svc.lb_http_profile, + 'LB HTTP app profiles') + self._cleanup_lb_resource(lb_svc.lb_fast_tcp_profile, + 'LB HTTPS app profiles') + self._cleanup_lb_resource(lb_svc.lb_fast_udp_profile, + 'LB UDP app profiles') + self._cleanup_lb_resource(lb_svc.client_ssl_profile, + 'LB SSL client profiles') + self._cleanup_lb_resource(lb_svc.lb_cookie_persistence_profile, + 'LB cookie persistence profiles') + self._cleanup_lb_resource(lb_svc.lb_source_ip_persistence_profile, + 'LB source IP persistence profiles') + + def cleanup_lb_monitors(self): + lb_svc = self.nsxpolicy.load_balancer + self._cleanup_lb_resource(lb_svc.lb_monitor_profile_http, + 'LB HTTP monitor profiles') + self._cleanup_lb_resource(lb_svc.lb_monitor_profile_https, + 'LB HTTPS monitor profiles') + self._cleanup_lb_resource(lb_svc.lb_monitor_profile_udp, + 'LB UDP monitor profiles') + self._cleanup_lb_resource(lb_svc.lb_monitor_profile_icmp, + 'LB ICMP monitor profiles') + self._cleanup_lb_resource(lb_svc.lb_monitor_profile_tcp, + 'LB TCP monitor profiles') + + def cleanup_lb_services(self): + self._cleanup_lb_resource(self.nsxpolicy.load_balancer.lb_service, + 'LB services') + + def cleanup_load_balancers(self): + self.cleanup_lb_virtual_servers() + self.cleanup_lb_profiles() + self.cleanup_lb_services() + self.cleanup_lb_server_pools() + self.cleanup_lb_monitors() + def cleanup_all(self): """ Per domain cleanup steps: @@ -381,6 +440,7 @@ class NSXClient(object): print("Cleaning up openstack global resources") self.cleanup_segments() + self.cleanup_load_balancers() self.cleanup_nsx_logical_dhcp_servers() self.cleanup_tier1_routers() self.cleanup_rules_services() diff --git a/doc/source/devstack.rst b/doc/source/devstack.rst index 5110b0c8c5..e3dacab347 100644 --- a/doc/source/devstack.rst +++ b/doc/source/devstack.rst @@ -311,6 +311,52 @@ Add neutron-fwaas repo as an external repository and configure following flags i [service_providers] service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default +LBaaS v2 Driver +~~~~~~~~~~~~~~~ + +Add lbaas repo as an external repository and configure following flags in ``local.conf``:: + + [[local]|[localrc]] + enable_service q-lbaasv2 + Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_lbaasv2 + +Configure the service provider:: + [[post-config|$NEUTRON_CONF]] + [service_providers] + service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default + + [DEFAULT] + api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions + +Octavia +~~~~~~~ + +Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``:: + + [[local|localrc]] + OCTAVIA_NODE=api + DISABLE_AMP_IMAGE_BUILD=True + LIBS_FROM_GIT=python-openstackclient,python-octaviaclient + enable_plugin octavia https://git.openstack.org/openstack/octavia.git + enable_plugin octavia-dashboard https://git.openstack.org/openstack/octavia-dashboard + enable_service octavia + enable_service o-api + + [[post-config|$OCTAVIA_CONF]] + [DEFAULT] + verbose = True + debug = True + + [api_settings] + default_provider_driver=vmwareedge + enabled_provider_drivers=vmwareedge:NSX + + [oslo_messaging] + topic=vmwarensxv_edge_lb + + [controller_worker] + network_driver = network_noop_driver + NSX-TVD ------- diff --git a/vmware_nsx/plugins/nsx_p/plugin.py b/vmware_nsx/plugins/nsx_p/plugin.py index 81502065f4..9c9d0bd75c 100644 --- a/vmware_nsx/plugins/nsx_p/plugin.py +++ b/vmware_nsx/plugins/nsx_p/plugin.py @@ -76,17 +76,18 @@ from vmware_nsx.plugins.nsx_p import availability_zones as nsxp_az from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.common import utils as fwaas_utils from vmware_nsx.services.fwaas.nsx_p import fwaas_callbacks_v2 +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_p.v2 import lb_driver_v2 from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import pol_utils as qos_utils from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts -from vmware_nsxlib.v3 import security -from vmware_nsxlib.v3 import utils as nsxlib_utils - from vmware_nsxlib.v3.policy import constants as policy_constants from vmware_nsxlib.v3.policy import core_defs as policy_defs +from vmware_nsxlib.v3 import security +from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = log.getLogger(__name__) NSX_P_SECURITY_GROUP_TAG = 'os-security-group' @@ -197,6 +198,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): self._prepare_default_rules() self._init_segment_profiles() self._init_dhcp_metadata() + self.lbv2_driver = self._init_lbv2_driver() # Init QoS qos_driver.register(qos_utils.PolicyQosNotificationsHandler()) @@ -342,6 +344,13 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): def plugin_type(): return projectpluginmap.NsxPlugins.NSX_P + def _init_lbv2_driver(self): + # Get LBaaSv2 driver during plugin initialization. If the platform + # has a version that doesn't support native loadbalancing, the driver + # will return a NotImplementedManager class. + LOG.debug("Initializing LBaaSv2.0 nsxp driver") + return lb_driver_v2.EdgeLoadbalancerDriverV2() + @staticmethod def is_tvd_plugin(): return False @@ -1225,11 +1234,20 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): if not router: router = self._get_router(context, router_id) snat_exist = router.enable_snat - # TODO(asarfaty) - add lbaas/octavia support here - lb_exist = False fw_exist = self._router_has_edge_fw_rules(context, router) + lb_exist = False + if not (fw_exist or snat_exist): + lb_exist = self.service_router_has_loadbalancers(router_id) return snat_exist or lb_exist or fw_exist + def service_router_has_loadbalancers(self, router_id): + tags_to_search = [{'scope': lb_const.LR_ROUTER_TYPE, 'tag': router_id}] + router_lb_services = self.nsxpolicy.search_by_tags( + tags_to_search, + self.nsxpolicy.load_balancer.lb_service.entry_def.resource_type() + )['results'] + return True if router_lb_services else False + def verify_sr_at_backend(self, router_id): """Check if the backend Tier1 has a service router or not""" if self.nsxpolicy.tier1.get_edge_cluster_path(router_id): diff --git a/vmware_nsx/services/lbaas/base_mgr.py b/vmware_nsx/services/lbaas/base_mgr.py index 723cd61683..feeb59442d 100644 --- a/vmware_nsx/services/lbaas/base_mgr.py +++ b/vmware_nsx/services/lbaas/base_mgr.py @@ -48,7 +48,10 @@ class LoadbalancerBaseManager(object): if not self._core_plugin: self._core_plugin = ( self._get_plugin(plugin_const.CORE)) - + if self._core_plugin.is_tvd_plugin(): + # get the plugin that match this driver + self._core_plugin = self._core_plugin.get_plugin_by_type( + self._plugin_id) return self._core_plugin @property @@ -64,36 +67,23 @@ class EdgeLoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self, vcns_driver): super(EdgeLoadbalancerBaseManager, self).__init__() + self._plugin_id = projectpluginmap.NsxPlugins.NSX_V self.vcns_driver = vcns_driver @property def vcns(self): return self.vcns_driver.vcns - @property - def core_plugin(self): - if not self._core_plugin: - self._core_plugin = ( - self._get_plugin(plugin_const.CORE)) - if self._core_plugin.is_tvd_plugin(): - # get the plugin that match this driver - self._core_plugin = self._core_plugin.get_plugin_by_type( - projectpluginmap.NsxPlugins.NSX_V) - return self._core_plugin - class Nsxv3LoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self): super(Nsxv3LoadbalancerBaseManager, self).__init__() + self._plugin_id = projectpluginmap.NsxPlugins.NSX_T - @property - def core_plugin(self): - if not self._core_plugin: - self._core_plugin = ( - self._get_plugin(plugin_const.CORE)) - if self._core_plugin.is_tvd_plugin(): - # get the plugin that match this driver - self._core_plugin = self._core_plugin.get_plugin_by_type( - projectpluginmap.NsxPlugins.NSX_T) - return self._core_plugin + +class NsxpLoadbalancerBaseManager(LoadbalancerBaseManager): + + def __init__(self): + super(NsxpLoadbalancerBaseManager, self).__init__() + self._plugin_id = projectpluginmap.NsxPlugins.NSX_P diff --git a/vmware_nsx/services/lbaas/lb_common.py b/vmware_nsx/services/lbaas/lb_common.py new file mode 100644 index 0000000000..7088d70eca --- /dev/null +++ b/vmware_nsx/services/lbaas/lb_common.py @@ -0,0 +1,55 @@ +# Copyright 2019 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from neutron_lib import exceptions as n_exc + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import lb_const + + +def validate_session_persistence(pool, listener, completor, old_pool=None): + sp = pool.get('session_persistence') + if not listener or not sp: + # safety first! + return + # L4 listeners only allow source IP persistence + if (listener['protocol'] == lb_const.LB_PROTOCOL_TCP and + sp['type'] != lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP): + completor(success=False) + msg = (_("Invalid session persistence type %(sp_type)s for " + "pool on listener %(lst_id)s with %(proto)s protocol") % + {'sp_type': sp['type'], + 'lst_id': listener['id'], + 'proto': listener['protocol']}) + raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) + # Cannot switch (yet) on update from source IP to cookie based, and + # vice versa + cookie_pers_types = (lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE, + lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE) + if old_pool: + oldsp = old_pool.get('session_persistence') + if not oldsp: + return + if ((sp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP and + oldsp['type'] in cookie_pers_types) or + (sp['type'] in cookie_pers_types and + oldsp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP)): + completor(success=False) + msg = (_("Cannot update session persistence type to " + "%(sp_type)s for pool on listener %(lst_id)s " + "from %(old_sp_type)s") % + {'sp_type': sp['type'], + 'lst_id': listener['id'], + 'old_sp_type': oldsp['type']}) + raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) diff --git a/vmware_nsx/services/lbaas/nsx_p/__init__.py b/vmware_nsx/services/lbaas/nsx_p/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/__init__.py b/vmware_nsx/services/lbaas/nsx_p/implementation/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/healthmonitor_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/healthmonitor_mgr.py new file mode 100644 index 0000000000..1a9065e100 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/healthmonitor_mgr.py @@ -0,0 +1,158 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgeHealthMonitorManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + + def _get_monitor_policy_client(self, hm): + lb_client = self.core_plugin.nsxpolicy.load_balancer + if hm['type'] == lb_const.LB_HEALTH_MONITOR_TCP: + return lb_client.lb_monitor_profile_tcp + elif hm['type'] == lb_const.LB_HEALTH_MONITOR_HTTP: + return lb_client.lb_monitor_profile_http + elif hm['type'] == lb_const.LB_HEALTH_MONITOR_HTTPS: + return lb_client.lb_monitor_profile_https + elif hm['type'] == lb_const.LB_HEALTH_MONITOR_PING: + return lb_client.lb_monitor_profile_icmp + else: + msg = (_('Cannot create health monitor %(monitor)s with ' + 'type %(type)s') % {'monitor': hm['id'], + 'type': hm['type']}) + raise n_exc.InvalidInput(error_message=msg) + + def _build_monitor_args(self, hm): + body = { + 'interval': hm['delay'], + 'fall_count': hm['max_retries'], + 'timeout': hm['timeout'], + 'name': utils.get_name_and_uuid(hm['name'] or 'monitor', hm['id']) + } + if hm['type'] in [lb_const.LB_HEALTH_MONITOR_HTTP, + lb_const.LB_HEALTH_MONITOR_HTTPS]: + if hm['http_method']: + body['request_method'] = hm['http_method'] + if hm['url_path']: + body['request_url'] = hm['url_path'] + if hm['expected_codes']: + codes = hm['expected_codes'].split(",") + body['response_status_codes'] = [ + int(code) for code in codes] + return body + + @log_helpers.log_method_call + def create(self, context, hm, completor): + pool_id = hm['pool']['id'] + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + monitor_client = self._get_monitor_policy_client(hm) + tags = lb_utils.get_tags(self.core_plugin, hm['id'], + lb_const.LB_HM_TYPE, + hm['tenant_id'], context.project_name) + monitor_body = self._build_monitor_args(hm) + lb_monitor = None + try: + lb_monitor = monitor_client.create_or_overwrite( + lb_monitor_profile_id=hm['id'], + tags=tags, **monitor_body) + except nsxlib_exc.ManagerError: + with excutils.save_and_reraise_exception(): + completor(success=False) + + if pool_id and lb_monitor: + try: + hm_path = monitor_client.get_path(hm['id']) + pool_client.add_monitor_to_pool(pool_id, [hm_path]) + except nsxlib_exc.ManagerError: + completor(success=False) + msg = _('Failed to attach monitor %(monitor)s to pool ' + '%(pool)s') % {'monitor': hm['id'], + 'pool': pool_id} + raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) + else: + completor(success=False) + msg = _('Failed to attach monitor %(monitor)s to pool ' + '%(pool)s: NSX pool was not found on the DB') % { + 'monitor': hm['id'], + 'pool': pool_id} + raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_hm, new_hm, completor): + monitor_client = self._get_monitor_policy_client(new_hm) + try: + monitor_body = self._build_monitor_args(new_hm) + monitor_name = utils.get_name_and_uuid(new_hm['name'] or 'monitor', + new_hm['id']) + monitor_client.update(new_hm['id'], name=monitor_name, + **monitor_body) + except nsxlib_exc.ManagerError as exc: + completor(success=False) + msg = _('Failed to update monitor %(monitor)s with exception' + ' %s(exc)s') % {'monitor': new_hm['id'], 'exc': exc} + raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def delete(self, context, hm, completor): + pool_id = hm['pool']['id'] + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + monitor_client = self._get_monitor_policy_client(hm) + + try: + hm_path = monitor_client.get_path(hm['id']) + pool_client.remove_monitor_from_pool(pool_id, + hm_path) + except nsxlib_exc.ResourceNotFound: + pass + except nsxlib_exc.ManagerError as exc: + completor(success=False) + msg = _('Failed to remove monitor %(monitor)s from pool %(pool)s ' + 'with exception from nsx %(exc)s') % { + 'monitor': hm['id'], + 'pool': pool_id, + 'exc': exc} + raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) + + try: + monitor_client.delete(hm['id']) + except nsxlib_exc.ResourceNotFound: + pass + except nsxlib_exc.ManagerError as exc: + completor(success=False) + msg = _('Failed to delete monitor %(monitor)s from backend with ' + 'exception %(exc)s') % {'monitor': hm['id'], 'exc': exc} + raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, hm, completor): + self.delete(context, hm, completor) diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py new file mode 100644 index 0000000000..b8a7c70343 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py @@ -0,0 +1,91 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.common import exceptions as nsx_exc +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgeL7PolicyManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + @log_helpers.log_method_call + def create(self, context, policy, completor): + vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server + policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', + policy['id']) + rule_body = lb_utils.convert_l7policy_to_lb_rule(policy) + try: + + vs_client.add_lb_rule(policy['listener_id'], + name=policy_name, + position=policy.get('position', 0) - 1, + **rule_body) + except nsxlib_exc.ManagerError: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to add rule %(rule)% to virtual server ' + '%(vs)s at NSX backend', + {'rule': policy['id'], 'vs': policy['listener_id']}) + + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_policy, new_policy, completor): + vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server + policy_name = utils.get_name_and_uuid(old_policy['name'] or 'policy', + old_policy['id']) + rule_body = lb_utils.convert_l7policy_to_lb_rule(context, new_policy) + try: + vs_client.update_lb_rule( + new_policy['listener_id'], + name=policy_name, + position=new_policy.get('position', 0) - 1, + **rule_body) + + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to update L7policy %(policy)s: ' + '%(err)s', {'policy': old_policy['id'], 'err': e}) + completor(success=True) + + def delete(self, context, policy, completor): + vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server + policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', + policy['id']) + try: + vs_client.remove_lb_rule(policy['listener_id'], + policy_name) + except nsx_exc.NsxResourceNotFound: + pass + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to delete L7 policy: %(policy)s') % + {'policy': policy['id']}) + raise n_exc.BadRequest(resource='lbaas-l7policy', msg=msg) + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, policy, completor): + self.delete(context, policy, completor) diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py new file mode 100644 index 0000000000..4bb1412f70 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py @@ -0,0 +1,67 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgeL7RuleManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + @log_helpers.log_method_call + def _update_l7rule_change(self, rule, completor, delete=False): + vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server + policy = rule['policy'] + policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', + policy['id']) + if delete: + lb_utils.remove_rule_from_policy(rule) + else: + lb_utils.update_rule_in_policy(rule) + rule_body = lb_utils.convert_l7policy_to_lb_rule(rule['policy']) + try: + vs_client.update_lb_rule(policy['listener_id'], + name=policy_name, + position=policy.get('position', 0) - 1, + **rule_body) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to update L7policy %(policy)s: ' + '%(err)s', {'policy': policy['id'], 'err': e}) + + completor(success=True) + + @log_helpers.log_method_call + def create(self, context, rule, completor): + self._update_l7rule_change(rule, completor) + + @log_helpers.log_method_call + def update(self, context, old_rule, new_rule, completor): + self._update_l7rule_change(new_rule, completor) + + @log_helpers.log_method_call + def delete(self, context, rule, completor): + self._update_l7rule_change(rule, completor, delete=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, rule, completor): + # No action should be taken on rules delete cascade + pass diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/lb_const.py b/vmware_nsx/services/lbaas/nsx_p/implementation/lb_const.py new file mode 100644 index 0000000000..c28835cfe5 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/lb_const.py @@ -0,0 +1,18 @@ +# Copyright 2019 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +LB_SELECT_POOL_ACTION = 'LBSelectPoolAction' +LB_HTTP_REDIRECT_ACTION = 'LBHttpRedirectAction' +LB_REJECT_ACTION = 'LBHttpRejectAction' diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/lb_utils.py b/vmware_nsx/services/lbaas/nsx_p/implementation/lb_utils.py new file mode 100644 index 0000000000..6173694158 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/lb_utils.py @@ -0,0 +1,146 @@ +# Copyright 2019 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_config import cfg +from oslo_log import helpers as log_helpers + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_const as p_const +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils + +ADV_RULE_NAME = 'LB external VIP advertisement' + + +@log_helpers.log_method_call +def get_rule_match_conditions(policy): + match_conditions = [] + # values in rule have already been validated in LBaaS API, + # we won't need to valid anymore in driver, and just get + # the LB rule mapping from the dict. + for rule in policy['rules']: + match_type = lb_const.LB_RULE_MATCH_TYPE[rule['compare_type']] + if rule['type'] == lb_const.L7_RULE_TYPE_COOKIE: + header_value = rule['key'] + '=' + rule['value'] + match_conditions.append( + {'type': 'LBHttpRequestHeaderCondition', + 'match_type': match_type, + 'header_name': 'Cookie', + 'header_value': header_value}) + elif rule['type'] == lb_const.L7_RULE_TYPE_FILE_TYPE: + match_conditions.append( + {'type': 'LBHttpRequestUriCondition', + 'match_type': match_type, + 'uri': '*.' + rule['value']}) + elif rule['type'] == lb_const.L7_RULE_TYPE_HEADER: + match_conditions.append( + {'type': 'LBHttpRequestHeaderCondition', + 'match_type': match_type, + 'header_name': rule['key'], + 'header_value': rule['value']}) + elif rule['type'] == lb_const.L7_RULE_TYPE_HOST_NAME: + match_conditions.append( + {'type': 'LBHttpRequestHeaderCondition', + 'match_type': match_type, + 'header_name': 'Host', + 'header_value': rule['value']}) + elif rule['type'] == lb_const.L7_RULE_TYPE_PATH: + match_conditions.append( + {'type': 'LBHttpRequestUriCondition', + 'match_type': match_type, + 'uri': rule['value']}) + else: + msg = (_('l7rule type %(type)s is not supported in LBaaS') % + {'type': rule['type']}) + raise n_exc.BadRequest(resource='lbaas-l7rule', msg=msg) + return match_conditions + + +@log_helpers.log_method_call +def get_rule_actions(l7policy): + if l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: + if l7policy['redirect_pool_id']: + lb_pool_id = l7policy['redirect_pool_id'] + actions = [{'type': p_const.LB_SELECT_POOL_ACTION, + 'pool_id': lb_pool_id}] + else: + msg = _('Failed to get LB pool binding from nsx db') + raise n_exc.BadRequest(resource='lbaas-l7rule-create', + msg=msg) + elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: + actions = [{'type': p_const.LB_HTTP_REDIRECT_ACTION, + 'redirect_status': lb_const.LB_HTTP_REDIRECT_STATUS, + 'redirect_url': l7policy['redirect_url']}] + elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REJECT: + actions = [{'type': p_const.LB_REJECT_ACTION, + 'reply_status': lb_const.LB_HTTP_REJECT_STATUS}] + else: + msg = (_('Invalid l7policy action: %(action)s') % + {'action': l7policy['action']}) + raise n_exc.BadRequest(resource='lbaas-l7rule-create', + msg=msg) + return actions + + +@log_helpers.log_method_call +def convert_l7policy_to_lb_rule(policy): + return { + 'match_conditions': get_rule_match_conditions(policy), + 'actions': get_rule_actions(policy), + 'phase': lb_const.LB_RULE_HTTP_FORWARDING, + 'match_strategy': 'ALL' + } + + +@log_helpers.log_method_call +def remove_rule_from_policy(rule): + l7rules = rule['policy']['rules'] + rule['policy']['rules'] = [r for r in l7rules if r['id'] != rule['id']] + + +@log_helpers.log_method_call +def update_rule_in_policy(rule): + remove_rule_from_policy(rule) + rule['policy']['rules'].append(rule) + + +@log_helpers.log_method_call +def update_router_lb_vip_advertisement(context, core_plugin, router_id): + router = core_plugin.get_router(context, router_id) + + # Add a rule to advertise external vips on the router + + # TODO(kobis): Code below should be executed when platform supports + # + # external_subnets = core_plugin._find_router_gw_subnets( + # context.elevated(), router) + # external_cidrs = [s['cidr'] for s in external_subnets] + # if external_cidrs: + # core_plugin.nsxpolicy.tier1.add_advertisement_rule( + # router_id, + # ADV_RULE_NAME, + # p_constants.ADV_RULE_PERMIT, + # p_constants.ADV_RULE_OPERATOR_GE, + # [p_constants.ADV_RULE_TIER1_LB_VIP], + # external_cidrs) + if cfg.CONF.nsx_p.allow_passthrough: + lb_utils.update_router_lb_vip_advertisement( + context, core_plugin, router, + core_plugin.nsxpolicy.tier1.get_realized_id(router_id)) + else: + msg = (_('Failed to set loadbalancer advertisement rule for router %s') + % router_id) + raise n_exc.BadRequest(resource='lbaas-loadbalancer', msg=msg) diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/listener_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/listener_mgr.py new file mode 100644 index 0000000000..f098846782 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/listener_mgr.py @@ -0,0 +1,231 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.common import exceptions as nsx_exc +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3.policy import core_resources +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgeListenerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + @log_helpers.log_method_call + def _get_listener_tags(self, context, listener): + tags = lb_utils.get_tags(self.core_plugin, listener['id'], + lb_const.LB_LISTENER_TYPE, + listener['tenant_id'], + context.project_name) + tags.append({ + 'scope': 'os-lbaas-lb-name', + 'tag': listener['loadbalancer']['name'][:utils.MAX_TAG_LEN]}) + tags.append({ + 'scope': 'os-lbaas-lb-id', + 'tag': listener['loadbalancer_id']}) + return tags + + @log_helpers.log_method_call + def _upload_certificate(self, listener_id, cert_href, tags, + certificate=None): + cert_client = self.core_plugin.nsxpolicy.certificate + + passphrase = certificate.get_private_key_passphrase() + if not passphrase: + passphrase = core_resources.IGNORE + cert_client.create_or_overwrite( + cert_href, certificate_id=listener_id, + pem_encoded=certificate.get_certificate(), + private_key=certificate.get_private_key(), + passphrase=passphrase, + tags=tags) + + return { + 'client_ssl_profile_binding': { + 'ssl_profile_id': self.core_plugin.client_ssl_profile, + 'default_certificate_id': listener_id + } + } + + @log_helpers.log_method_call + def _get_virtual_server_kwargs(self, context, listener, vs_name, tags, + certificate=None): + # If loadbalancer vip_port already has floating ip, use floating + # IP as the virtual server VIP address. Else, use the loadbalancer + # vip_address directly on virtual server. + filters = {'port_id': [listener['loadbalancer']['vip_port_id']]} + floating_ips = self.core_plugin.get_floatingips(context, + filters=filters) + if floating_ips: + lb_vip_address = floating_ips[0]['floating_ip_address'] + else: + lb_vip_address = listener['loadbalancer']['vip_address'] + kwargs = {'virtual_server_id': listener['id'], + 'ip_address': lb_vip_address, + 'ports': [listener['protocol_port']], + 'application_profile_id': listener['id'], + 'lb_service_id': listener['loadbalancer_id'], + 'description': listener.get('description')} + if vs_name: + kwargs['name'] = vs_name + if tags: + kwargs['tags'] = tags + if listener['connection_limit'] != -1: + kwargs['max_concurrent_connections'] = listener['connection_limit'] + if listener['default_pool_id']: + kwargs['pool_id'] = listener['default_pool_id'] + if certificate: + ssl_profile_binding = self._upload_certificate( + listener['id'], listener['default_tls_container_id'], tags, + certificate=certificate) + if (listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS + and ssl_profile_binding): + kwargs.update(ssl_profile_binding) + return kwargs + + def _get_nsxlib_app_profile(self, nsxlib_lb, listener): + if (listener['protocol'] == lb_const.LB_PROTOCOL_HTTP or + listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): + app_client = nsxlib_lb.lb_http_profile + elif (listener['protocol'] == lb_const.LB_PROTOCOL_TCP or + listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS): + app_client = nsxlib_lb.lb_fast_tcp_profile + else: + msg = (_('Cannot create listener %(listener)s with ' + 'protocol %(protocol)s') % + {'listener': listener['id'], + 'protocol': listener['protocol']}) + raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) + + return app_client + + @log_helpers.log_method_call + def create(self, context, listener, completor, + certificate=None): + nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer + vs_client = nsxlib_lb.virtual_server + vs_name = utils.get_name_and_uuid(listener['name'] or 'listener', + listener['id']) + tags = self._get_listener_tags(context, listener) + app_client = self._get_nsxlib_app_profile(nsxlib_lb, listener) + try: + app_client.create_or_overwrite( + lb_app_profile_id=listener['id'], name=vs_name, tags=tags) + kwargs = self._get_virtual_server_kwargs( + context, listener, vs_name, tags, certificate) + vs_client.create_or_overwrite(**kwargs) + except nsxlib_exc.ManagerError: + completor(success=False) + msg = _('Failed to create virtual server at NSX backend') + raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_listener, new_listener, completor, + certificate=None): + nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer + vs_client = nsxlib_lb.virtual_server + app_client = self._get_nsxlib_app_profile(nsxlib_lb, old_listener) + + vs_name = None + tags = None + if new_listener['name'] != old_listener['name']: + vs_name = utils.get_name_and_uuid( + new_listener['name'] or 'listener', + new_listener['id']) + tags = self._get_listener_tags(context, new_listener) + + try: + vs_id = new_listener['id'] + app_profile_id = new_listener['id'] + updated_kwargs = self._get_virtual_server_kwargs( + context, new_listener, vs_name, tags, app_profile_id, + certificate) + vs_client.update(vs_id, **updated_kwargs) + if vs_name: + app_client.update(app_profile_id, display_name=vs_name, + tags=tags) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to update listener %(listener)s with ' + 'error %(error)s', + {'listener': old_listener['id'], 'error': e}) + completor(success=True) + + @log_helpers.log_method_call + def delete(self, context, listener, completor): + nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer + vs_client = nsxlib_lb.virtual_server + app_client = self._get_nsxlib_app_profile(nsxlib_lb, listener) + + vs_id = listener['id'] + app_profile_id = listener['id'] + + try: + vs_client.delete(vs_id) + except nsx_exc.NsxResourceNotFound: + LOG.error("virtual server not found on nsx: %(vs)s", {'vs': vs_id}) + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to delete virtual server: %(vs)s') % + {'vs': vs_id}) + raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) + + try: + app_client.delete(app_profile_id) + except nsx_exc.NsxResourceNotFound: + LOG.error("application profile not found on nsx: %s", + app_profile_id) + + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to delete application profile: %(app)s') % + {'app': app_profile_id}) + raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) + + # Delete imported NSX cert if there is any + if listener['default_tls_container_id']: + cert_client = self.core_plugin.nsxpolicy.certificate + try: + cert_client.delete(listener['id']) + except nsx_exc.NsxResourceNotFound: + LOG.error("Certificate not found on nsx: %s", listener['id']) + + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to delete certificate: %(crt)s') % + {'crt': listener['id']}) + raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, listener, completor): + self.delete(context, listener, completor) + + +def stats_getter(context, core_plugin, ignore_list=None): + """Update Octavia statistics for each listener (virtual server)""" + #TODO(kobis): Implement diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py new file mode 100644 index 0000000000..48ac856bbe --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py @@ -0,0 +1,227 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils as p_utils +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + + @log_helpers.log_method_call + def _validate_lb_network(self, context, lb): + router_id = lb_utils.get_router_from_network( + context, self.core_plugin, lb['vip_subnet_id']) + + return router_id + + @log_helpers.log_method_call + def _get_info_from_fip(self, context, fip): + filters = {'floating_ip_address': [fip]} + floating_ips = self.core_plugin.get_floatingips(context, + filters=filters) + if floating_ips: + return (floating_ips[0]['fixed_ip_address'], + floating_ips[0]['router_id']) + else: + msg = (_('Member IP %(fip)s is an external IP, and is expected to ' + 'be a floating IP') % {'fip': fip}) + raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) + + @log_helpers.log_method_call + def create(self, context, lb, completor): + lb_id = lb['id'] + + network = lb_utils.get_network_from_subnet( + context, self.core_plugin, lb['vip_subnet_id']) + + router_id = self._validate_lb_network(context, lb) + if not router_id and not network.get('router:external'): + completor(success=False) + msg = (_('Cannot create a loadbalancer %(lb_id)s on subnet. ' + '%(subnet)s is neither public nor connected to the LB ' + 'router') % + {'lb_id': lb_id, 'subnet': lb['vip_subnet_id']}) + raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) + + if router_id and not self.core_plugin.service_router_has_services( + context, router_id): + self.core_plugin.create_service_router(context, router_id) + + lb_name = utils.get_name_and_uuid(lb['name'] or 'lb', + lb_id) + tags = lb_utils.get_tags(self.core_plugin, router_id, + lb_const.LR_ROUTER_TYPE, + lb['tenant_id'], context.project_name) + + lb_size = lb_utils.get_lb_flavor_size(self.flavor_plugin, context, + lb.get('flavor_id')) + + service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service + try: + if network.get('router:external'): + connectivity_path = None + else: + connectivity_path = self.core_plugin.nsxpolicy.tier1.get_path( + router_id) + service_client.create_or_overwrite( + lb_name, lb_service_id=lb['id'], description=lb['description'], + tags=tags, size=lb_size, connectivity_path=connectivity_path) + + # Add rule to advertise external vips + p_utils.update_router_lb_vip_advertisement( + context, self.core_plugin, router_id) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to create loadbalancer %(lb)s for lb with ' + 'exception %(e)s', {'lb': lb['id'], 'e': e}) + + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_lb, new_lb, completor): + completor(success=True) + + @log_helpers.log_method_call + def delete(self, context, lb, completor): + service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service + router_id = lb_utils.get_router_from_network( + context, self.core_plugin, lb['vip_subnet_id']) + + if router_id: + try: + service_client.delete(lb['id']) + + if not self.core_plugin.service_router_has_services(context, + router_id): + self.core_plugin.delete_service_router(router_id) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to delete loadbalancer %(lb)s for lb ' + 'with error %(err)s', + {'lb': lb['id'], 'err': e}) + else: + LOG.warning('Router not found for loadbalancer %s', lb['id']) + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, lb, completor): + """Delete all backend and DB resources of this loadbalancer""" + self.delete(context, lb, completor) + + @log_helpers.log_method_call + def refresh(self, context, lb): + # TODO(kobis): implement + pass + + @log_helpers.log_method_call + def _get_lb_virtual_servers(self, context, lb): + # Get all virtual servers that belong to this loadbalancer + vs_list = [vs['id'] for vs in lb['listeners']] + return vs_list + + @log_helpers.log_method_call + def stats(self, context, lb): + # Since multiple LBaaS loadbalancer can share the same LB service, + # get the corresponding virtual servers' stats instead of LB service. + stats = {'active_connections': 0, + 'bytes_in': 0, + 'bytes_out': 0, + 'total_connections': 0} + + service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service + vs_list = self._get_lb_virtual_servers(context, lb) + try: + rsp = service_client.get_stats(lb['id']) + if rsp: + for vs in rsp.get('virtual_servers', []): + # Skip the virtual server that doesn't belong + # to this loadbalancer + if vs['virtual_server_id'] not in vs_list: + continue + vs_stats = vs.get('statistics', {}) + for stat in lb_const.LB_STATS_MAP: + lb_stat = lb_const.LB_STATS_MAP[stat] + stats[stat] += vs_stats.get(lb_stat, 0) + + except nsxlib_exc.ManagerError: + msg = _('Failed to retrieve stats from LB service ' + 'for loadbalancer %(lb)s') % {'lb': lb['id']} + raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) + return stats + + @log_helpers.log_method_call + def get_operating_status(self, context, id, with_members=False): + service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service + try: + service_status = service_client.get_status(id) + if not isinstance(service_status, dict): + service_status = {} + + vs_statuses = service_client.get_status(id) + if not isinstance(vs_statuses, dict): + vs_statuses = {} + except nsxlib_exc.ManagerError: + LOG.warning("LB service %(lbs)s is not found", + {'lbs': id}) + return {} + + # get the loadbalancer status from the LB service + lb_status = self._nsx_status_to_lb_status( + service_status.get('service_status')) + statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}], + lb_const.LISTENERS: [], + lb_const.POOLS: [], + lb_const.MEMBERS: []} + + # Add the listeners statuses from the virtual servers statuses + for vs in vs_statuses.get('results', []): + vs_status = self._nsx_status_to_lb_status(vs.get('status')) + vs_id = vs.get('virtual_server_id') + statuses[lb_const.LISTENERS].append( + {'id': vs_id, 'status': vs_status}) + + @log_helpers.log_method_call + def _nsx_status_to_lb_status(self, nsx_status): + if not nsx_status: + # default fallback + return lb_const.ONLINE + + # Statuses that are considered ONLINE: + if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP', + 'NO_STANDBY']: + return lb_const.ONLINE + # Statuses that are considered OFFLINE: + if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']: + return lb_const.OFFLINE + if nsx_status.upper() == 'DISABLED': + return lb_const.DISABLED + + # default fallback + LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status) + return lb_const.ONLINE diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/member_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/member_mgr.py new file mode 100644 index 0000000000..17eabaafe1 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/member_mgr.py @@ -0,0 +1,163 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils as p_utils +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils + +LOG = logging.getLogger(__name__) + + +class EdgeMemberManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + @log_helpers.log_method_call + def _get_info_from_fip(self, context, fip): + filters = {'floating_ip_address': [fip]} + floating_ips = self.core_plugin.get_floatingips(context, + filters=filters) + if floating_ips: + return floating_ips[0]['fixed_ip_address'] + else: + msg = (_('Member IP %(fip)s is an external IP, and is expected to ' + 'be a floating IP') % {'fip': fip}) + raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) + + def _validate_member_lb_connectivity(self, context, member, completor): + lb = member['pool'].get('loadbalancer') + + if not lb: + msg = (_('Member %s loadbalancer object is missing') % + member['id']) + raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) + + subnet_id = lb.get('vip_subnet_id') + network = lb_utils.get_network_from_subnet( + context, self.core_plugin, subnet_id) + + if not network.get('router:external'): + return + + # If VIP is attached to an external network, loadbalancer_mgr might not + # attach it to a router. If not, set the LB service connectivity path + # to the member subnet's router. + service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service + service = service_client.get(lb['id']) + if not service.get('connectivity_path'): + router_id = lb_utils.get_router_from_network( + context, self.core_plugin, member['subnet_id']) + if not self.core_plugin.service_router_has_services(context, + router_id): + self.core_plugin.create_service_router(context, router_id) + + connectivity_path = self.core_plugin.nsxpolicy.tier1.get_path( + router_id) + try: + service_client.update(lb['id'], + connectivity_path=connectivity_path) + p_utils.update_router_lb_vip_advertisement( + context, self.core_plugin, router_id) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to set connectivity for loadbalancer ' + '%(lb)s on subnet %(sub)s with error %(err)s', + {'lb': lb['id'], + 'sub': member['subnet_id'], + 'err': e}) + + @log_helpers.log_method_call + def create(self, context, member, completor): + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + network = lb_utils.get_network_from_subnet( + context, self.core_plugin, member['subnet_id']) + + self._validate_member_lb_connectivity(context, member, completor) + + if network.get('router:external'): + fixed_ip = self._get_info_from_fip(context, member['address']) + else: + fixed_ip = member['address'] + pool_id = member['pool']['id'] + try: + pool_client.create_pool_member_and_add_to_pool( + pool_id, fixed_ip, + port=member['protocol_port'], + display_name=member['name'][:218] + '_' + member['id'], + weight=member['weight']) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to create member %(member)s on pool %(pool)s' + ': %(err)s', + {'member': member['id'], + 'pool': pool_id, 'err': e}) + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_member, new_member, completor): + network = lb_utils.get_network_from_subnet( + context, self.core_plugin, new_member['subnet_id']) + if network.get('router:external'): + fixed_ip = self._get_info_from_fip(context, new_member['address']) + else: + fixed_ip = new_member['address'] + pool_id = new_member['pool']['id'] + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + try: + pool_client.update_pool_member( + pool_id, fixed_ip, port=new_member['protocol_port'], + display_name=new_member['name'][:219] + '_' + new_member['id'], + weight=new_member['weight']) + + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to update member %(member)s on pool %(pool)s' + ': %(err)s', + {'member': new_member['id'], + 'pool': pool_id, 'err': e}) + completor(success=True) + + @log_helpers.log_method_call + def delete(self, context, member, completor): + network = lb_utils.get_network_from_subnet( + context, self.core_plugin, member['subnet_id']) + if network.get('router:external'): + fixed_ip = self._get_info_from_fip(context, member['address']) + else: + fixed_ip = member['address'] + pool_id = member['pool']['id'] + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + try: + pool_client.remove_pool_member( + pool_id, fixed_ip, port=member['protocol_port']) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to create member %(member)s on pool %(pool)s' + ': %(err)s', + {'member': member['id'], + 'pool': pool_id, 'err': e}) + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, member, completor): + # No action should be taken on members delete cascade + pass diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py new file mode 100644 index 0000000000..77c56949ae --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py @@ -0,0 +1,289 @@ +# Copyright 2018 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from neutron_lib import exceptions as n_exc +from oslo_log import helpers as log_helpers +from oslo_log import log as logging +from oslo_utils import excutils + +from vmware_nsx._i18n import _ +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_common +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3 import load_balancer as nsxlib_lb +from vmware_nsxlib.v3 import utils + +LOG = logging.getLogger(__name__) + + +class EdgePoolManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): + def _get_pool_kwargs(self, pool_id, name=None, tags=None, algorithm=None, + description=None): + kwargs = { + 'lb_pool_id': pool_id, + 'snat_translation': {'type': "LBSnatAutoMap"}} + if name: + kwargs['name'] = name + if tags: + kwargs['tags'] = tags + if algorithm: + kwargs['algorithm'] = algorithm + if description: + kwargs['description'] = description + return kwargs + + def _get_pool_tags(self, context, pool): + return lb_utils.get_tags(self.core_plugin, pool['id'], + lb_const.LB_POOL_TYPE, pool['tenant_id'], + context.project_name) + + def _remove_persistence(self, pool, vs_data): + sp = pool.get('session_persistence') + lb_client = self.core_plugin.nsxlib.load_balancer + pp_client = None + if not sp: + LOG.debug("No session persistence info for pool %s", pool['id']) + elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE: + pp_client = lb_client.lb_cookie_persistence_profile + elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE: + pp_client = lb_client.lb_cookie_persistence_profile + else: + pp_client = lb_client.lb_source_ip_persistence_profile + + persistence_profile_id = vs_data.get('persistence_profile_id') + if persistence_profile_id: + pp_client.delete(persistence_profile_id) + + def _process_vs_update(self, context, pool, pool_id, listener, completor): + vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server + try: + # Process pool persistence profile and + # create/update/delete profile for virtual server + vs_data = vs_client.get(listener['id']) + if pool and pool_id: + (persistence_profile_id, + post_process_func) = self._setup_session_persistence( + pool, self._get_pool_tags(context, pool), + listener, vs_data) + else: + post_process_func = functools.partial( + self._remove_persistence, pool, vs_data) + persistence_profile_id = None + except nsxlib_exc.ManagerError: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error("Failed to configure session persistence " + "profile for pool %(pool_id)s", + {'pool_id': pool['id']}) + try: + # Update persistence profile and pool on virtual server + vs_client.update( + listener['id'], + pool_id=pool_id, + lb_persistence_profile_id=persistence_profile_id) + + LOG.debug("Updated NSX virtual server %(vs_id)s with " + "pool %(pool_id)s and persistence profile %(prof)s", + {'vs_id': listener['id'], 'pool_id': pool['id'], + 'prof': persistence_profile_id}) + if post_process_func: + post_process_func() + except nsxlib_exc.ManagerError: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to attach pool %s to virtual ' + 'server %s', pool['id'], listener['id']) + + def _setup_session_persistence(self, pool, pool_tags, listener, vs_data): + sp = pool.get('session_persistence') + pers_type = None + cookie_name = None + cookie_mode = None + lb_client = self.core_plugin.nsxlib.load_balancer + pp_client = None + if not sp: + LOG.debug("No session persistence info for pool %s", pool['id']) + elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE: + pp_client = lb_client.lb_cookie_persistence_profile + pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE + cookie_name = sp.get('cookie_name') + if not cookie_name: + cookie_name = lb_const.SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME + cookie_mode = "INSERT" + elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE: + pp_client = lb_client.lb_cookie_persistence_profile + pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE + # In this case cookie name is mandatory + cookie_name = sp['cookie_name'] + cookie_mode = "REWRITE" + else: + pp_client = lb_client.lb_source_ip_persistence_profile + pers_type = nsxlib_lb.PersistenceProfileTypes.SOURCE_IP + + if pers_type: + # There is a profile to create or update + pp_kwargs = { + 'persistence_profile_id': pool['id'], + 'name': "persistence_%s" % utils.get_name_and_uuid( + pool['name'] or 'pool', pool['id'], maxlen=235), + 'tags': self._build_persistence_profile_tags( + pool_tags, listener) + } + if cookie_name: + pp_kwargs['cookie_name'] = cookie_name + pp_kwargs['cookie_mode'] = cookie_mode + + persistence_profile_id = vs_data.get('persistence_profile_id') + if persistence_profile_id: + # NOTE: removal of the persistence profile must be executed + # after the virtual server has been updated + if pers_type: + # Update existing profile + LOG.debug("Updating persistence profile %(profile_id)s for " + "listener %(listener_id)s with pool %(pool_id)s", + {'profile_id': persistence_profile_id, + 'listener_id': listener['id'], + 'pool_id': pool['id']}) + pp_client.update(persistence_profile_id, **pp_kwargs) + return persistence_profile_id, None + else: + # Prepare removal of persistence profile + return (None, functools.partial(self._remove_persistence, + vs_data)) + elif pers_type: + # Create persistence profile + pp_data = pp_client.create(**pp_kwargs) + LOG.debug("Created persistence profile %(profile_id)s for " + "listener %(listener_id)s with pool %(pool_id)s", + {'profile_id': pp_data['id'], + 'listener_id': listener['id'], + 'pool_id': pool['id']}) + return pp_data['id'], None + return None, None + + @log_helpers.log_method_call + def create(self, context, pool, completor): + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + + pool_name = utils.get_name_and_uuid(pool['name'] or 'pool', pool['id']) + tags = self._get_pool_tags(context, pool) + + description = pool.get('description') + lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(pool['lb_algorithm']) + # NOTE(salv-orlando): Guard against accidental compat breakages + try: + listener = pool['listener'] or pool['listeners'][0] + except IndexError: + # If listeners is an empty list we hit this exception + listener = None + # Perform additional validation for session persistence before + # creating resources in the backend + lb_common.validate_session_persistence(pool, listener, completor) + try: + kwargs = self._get_pool_kwargs(pool['id'], pool_name, tags, + lb_algorithm, description) + pool_client.create_or_overwrite(**kwargs) + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to create pool on NSX backend: %(pool)s') % + {'pool': pool['id']}) + raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) + + # The pool object can be created with either --listener or + # --loadbalancer option. If listener is present, the virtual server + # will be updated with the pool. Otherwise, just return. The binding + # will be added later when the pool is associated with layer7 rule. + # FIXME(salv-orlando): This two-step process can leave a zombie pool on + # NSX if the VS update operation fails + if listener: + self._process_vs_update(context, pool, pool['id'], listener, + completor) + completor(success=True) + + @log_helpers.log_method_call + def update(self, context, old_pool, new_pool, completor): + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + + pool_name = None + tags = None + lb_algorithm = None + description = None + if new_pool['name'] != old_pool['name']: + pool_name = utils.get_name_and_uuid(new_pool['name'] or 'pool', + new_pool['id']) + tags = self._get_pool_tags(context, new_pool) + if new_pool['lb_algorithm'] != old_pool['lb_algorithm']: + lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get( + new_pool['lb_algorithm']) + if new_pool.get('description') != old_pool.get('description'): + description = new_pool['description'] + # NOTE(salv-orlando): Guard against accidental compat breakages + try: + listener = new_pool['listener'] or new_pool['listeners'][0] + except IndexError: + # If listeners is an empty list we hit this exception + listener = None + # Perform additional validation for session persistence before + # operating on resources in the backend + lb_common.validate_session_persistence(new_pool, listener, completor, + old_pool=old_pool) + + try: + kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm, + description) + pool_client.update(new_pool['id'], **kwargs) + if (listener and new_pool['session_persistence'] != + old_pool['session_persistence']): + self._process_vs_update(context, new_pool, new_pool['id'], + listener, completor) + completor(success=True) + except Exception as e: + with excutils.save_and_reraise_exception(): + completor(success=False) + LOG.error('Failed to update pool %(pool)s with ' + 'error %(error)s', + {'pool': old_pool['id'], 'error': e}) + + @log_helpers.log_method_call + def delete(self, context, pool, completor): + pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool + + # NOTE(salv-orlando): Guard against accidental compat breakages + try: + listener = pool['listener'] or pool['listeners'][0] + except IndexError: + # If listeners is an empty list we hit this exception + listener = None + if listener: + self._process_vs_update(context, pool, None, listener, completor) + try: + pool_client.delete(pool['id']) + except nsxlib_exc.ResourceNotFound: + pass + except nsxlib_exc.ManagerError: + completor(success=False) + msg = (_('Failed to delete lb pool from nsx: %(pool)s') % + {'pool': pool['id']}) + raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) + + completor(success=True) + + @log_helpers.log_method_call + def delete_cascade(self, context, pool, completor): + self.delete(context, pool, completor) diff --git a/vmware_nsx/services/lbaas/nsx_p/v2/__init__.py b/vmware_nsx/services/lbaas/nsx_p/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vmware_nsx/services/lbaas/nsx_p/v2/lb_driver_v2.py b/vmware_nsx/services/lbaas/nsx_p/v2/lb_driver_v2.py new file mode 100644 index 0000000000..023f8f3b27 --- /dev/null +++ b/vmware_nsx/services/lbaas/nsx_p/v2/lb_driver_v2.py @@ -0,0 +1,143 @@ +# Copyright 2017 VMware, Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib.callbacks import events +from neutron_lib.callbacks import registry +from neutron_lib.callbacks import resources +from neutron_lib import constants as n_consts +from oslo_log import helpers as log_helpers +from oslo_log import log as logging + +from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_helper +from vmware_nsx.services.lbaas import lb_translators +from vmware_nsx.services.lbaas.nsx_p.implementation import healthmonitor_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import l7policy_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import l7rule_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import listener_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import member_mgr +from vmware_nsx.services.lbaas.nsx_p.implementation import pool_mgr +from vmware_nsx.services.lbaas.octavia import constants as oct_const + +LOG = logging.getLogger(__name__) + + +class NotImplementedManager(object): + """Helper class to make any subclass of LoadBalancerBaseDriver explode if + it is missing any of the required object managers. + """ + + def create(self, context, obj): + raise NotImplementedError() + + def update(self, context, old_obj, obj): + raise NotImplementedError() + + def delete(self, context, obj): + raise NotImplementedError() + + +class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager): + @log_helpers.log_method_call + def __init__(self): + super(EdgeLoadbalancerDriverV2, self).__init__() + + # Init all LBaaS objects + # Note(asarfaty): self.lbv2_driver is not yet defined at init time + # so lambda is used to retrieve it later. + self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper( + "loadbalancer", + loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(), + lb_translators.lb_loadbalancer_obj_to_dict, + lambda: self.lbv2_driver.load_balancer) + + self.listener = lb_helper.LBaaSNSXObjectManagerWrapper( + "listener", + listener_mgr.EdgeListenerManagerFromDict(), + lb_translators.lb_listener_obj_to_dict, + lambda: self.lbv2_driver.listener) + + self.pool = lb_helper.LBaaSNSXObjectManagerWrapper( + "pool", + pool_mgr.EdgePoolManagerFromDict(), + lb_translators.lb_pool_obj_to_dict, + lambda: self.lbv2_driver.pool) + + self.member = lb_helper.LBaaSNSXObjectManagerWrapper( + "member", + member_mgr.EdgeMemberManagerFromDict(), + lb_translators.lb_member_obj_to_dict, + lambda: self.lbv2_driver.member) + + self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper( + "healthmonitor", + healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(), + lb_translators.lb_hm_obj_to_dict, + lambda: self.lbv2_driver.health_monitor) + + self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper( + "l7policy", + l7policy_mgr.EdgeL7PolicyManagerFromDict(), + lb_translators.lb_l7policy_obj_to_dict, + lambda: self.lbv2_driver.l7policy) + + self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper( + "l7rule", + l7rule_mgr.EdgeL7RuleManagerFromDict(), + lb_translators.lb_l7rule_obj_to_dict, + lambda: self.lbv2_driver.l7rule) + + self._subscribe_router_delete_callback() + + def _subscribe_router_delete_callback(self): + # Check if there is any LB attachment for the NSX router. + # This callback is subscribed here to prevent router/GW/interface + # deletion if it still has LB service attached to it. + + #Note(asarfaty): Those callbacks are used by Octavia as well even + # though they are bound only here + registry.subscribe(self._check_lb_service_on_router, + resources.ROUTER, events.BEFORE_DELETE) + registry.subscribe(self._check_lb_service_on_router, + resources.ROUTER_GATEWAY, events.BEFORE_DELETE) + registry.subscribe(self._check_lb_service_on_router_interface, + resources.ROUTER_INTERFACE, events.BEFORE_DELETE) + + def _unsubscribe_router_delete_callback(self): + registry.unsubscribe(self._check_lb_service_on_router, + resources.ROUTER, events.BEFORE_DELETE) + registry.unsubscribe(self._check_lb_service_on_router, + resources.ROUTER_GATEWAY, events.BEFORE_DELETE) + registry.unsubscribe(self._check_lb_service_on_router_interface, + resources.ROUTER_INTERFACE, events.BEFORE_DELETE) + + def _get_lb_ports(self, context, subnet_ids): + dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2 + dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA + filters = {'device_owner': [dev_owner_v2, dev_owner_oct], + 'fixed_ips': {'subnet_id': subnet_ids}} + return self.loadbalancer.core_plugin.get_ports( + context, filters=filters) + + def _check_lb_service_on_router(self, resource, event, trigger, + payload=None): + """Prevent removing a router GW or deleting a router used by LB""" + pass + + def _check_lb_service_on_router_interface( + self, resource, event, trigger, payload=None): + # Prevent removing the interface of an LB subnet from a router + pass diff --git a/vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py b/vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py index d234de3090..d00e5b1653 100644 --- a/vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py @@ -24,6 +24,7 @@ from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr +from vmware_nsx.services.lbaas import lb_common from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc @@ -67,44 +68,6 @@ class EdgePoolManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): 'tag': listener['id']}) return tags - @log_helpers.log_method_call - def _validate_session_persistence(self, pool, listener, completor, - old_pool=None): - sp = pool.get('session_persistence') - if not listener or not sp: - # safety first! - return - # L4 listeners only allow source IP persistence - if (listener['protocol'] == lb_const.LB_PROTOCOL_TCP and - sp['type'] != lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP): - completor(success=False) - msg = (_("Invalid session persistence type %(sp_type)s for " - "pool on listener %(lst_id)s with %(proto)s protocol") % - {'sp_type': sp['type'], - 'lst_id': listener['id'], - 'proto': listener['protocol']}) - raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) - # Cannot switch (yet) on update from source IP to cookie based, and - # vice versa - cookie_pers_types = (lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE, - lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE) - if old_pool: - oldsp = old_pool.get('session_persistence') - if not oldsp: - return - if ((sp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP and - oldsp['type'] in cookie_pers_types) or - (sp['type'] in cookie_pers_types and - oldsp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP)): - completor(success=False) - msg = (_("Cannot update session persistence type to " - "%(sp_type)s for pool on listener %(lst_id)s " - "from %(old_sp_type)s") % - {'sp_type': sp['type'], - 'lst_id': listener['id'], - 'old_sp_type': oldsp['type']}) - raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) - @log_helpers.log_method_call def _setup_session_persistence(self, pool, pool_tags, listener, vs_data): @@ -244,7 +207,7 @@ class EdgePoolManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): listener = None # Perform additional validation for session persistence before # creating resources in the backend - self._validate_session_persistence(pool, listener, completor) + lb_common.validate_session_persistence(pool, listener, completor) try: kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm, description) @@ -317,8 +280,8 @@ class EdgePoolManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): # Perform additional validation for session persistence before # operating on resources in the backend - self._validate_session_persistence(new_pool, listener, completor, - old_pool=old_pool) + lb_common.validate_session_persistence(new_pool, listener, completor, + old_pool=old_pool) try: lb_pool_id = binding['lb_pool_id']