Remove neutron-lbaas support & dependencies
Commit Ia4f4b335295c0e6add79fe0db5dd31b4327fdb54 removed all the neutron-lbaas code from the master (Train) branch Change-Id: I9035f6238773aad0591436c856550b7a5e01e687
This commit is contained in:
parent
8c37986e84
commit
d753ec6945
@ -12,7 +12,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -25,7 +24,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -38,7 +36,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -54,7 +51,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -67,7 +63,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -80,7 +75,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
@ -96,7 +90,6 @@
|
||||
- openstack/neutron
|
||||
- openstack/networking-l2gw
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron-lbaas
|
||||
- x/vmware-nsxlib
|
||||
- openstack/neutron-fwaas
|
||||
- openstack/neutron-dynamic-routing
|
||||
|
@ -31,7 +31,7 @@ function _nsxv_ini_set {
|
||||
|
||||
|
||||
function install_neutron_projects {
|
||||
pkg_list="networking-l2gw networking-sfc neutron-lbaas neutron-fwaas neutron-dynamic-routing neutron-vpnaas octavia octavia-lib vmware-nsxlib"
|
||||
pkg_list="networking-l2gw networking-sfc neutron-fwaas neutron-dynamic-routing neutron-vpnaas octavia octavia-lib vmware-nsxlib"
|
||||
for pkg in `echo $pkg_list`
|
||||
do
|
||||
pkg_renamed=`echo $pkg | sed 's/-/_/g'`
|
||||
|
@ -1,11 +1,13 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
|
||||
sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
|
||||
sphinx!=1.6.6,!=1.6.7,>=1.6.2;python_version>='3.4' # BSD
|
||||
oslosphinx>=4.7.0 # Apache-2.0
|
||||
openstackdocstheme>=1.18.1 # Apache-2.0
|
||||
oslotest>=3.2.0 # Apache-2.0
|
||||
reno>=2.5.0 # Apache-2.0
|
||||
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
testresources>=2.0.0 # Apache-2.0/BSD
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
openstackdocstheme>=1.18.1 # Apache-2.0
|
||||
oslotest>=3.2.0 # Apache-2.0
|
||||
|
@ -8,24 +8,6 @@ configuration file(s) run ./stack.sh
|
||||
NSX-V
|
||||
-----
|
||||
|
||||
LBaaS v2 Driver
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Add lbaas repo as an external repository and configure following flags in ``local.conf``::
|
||||
|
||||
[[local]|[localrc]]
|
||||
enable_service q-lbaasv2
|
||||
Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_lbaasv2
|
||||
|
||||
Configure the service provider::
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[service_providers]
|
||||
service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default
|
||||
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[DEFAULT]
|
||||
api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions
|
||||
|
||||
QoS Driver
|
||||
~~~~~~~~~~
|
||||
|
||||
@ -217,23 +199,6 @@ Add neutron-fwaas repo as an external repository and configure following flags i
|
||||
[service_providers]
|
||||
service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default
|
||||
|
||||
LBaaS v2 Driver
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Add lbaas repo as an external repository and configure following flags in ``local.conf``::
|
||||
|
||||
[[local]|[localrc]]
|
||||
enable_service q-lbaasv2
|
||||
Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_lbaasv2
|
||||
|
||||
Configure the service provider::
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[service_providers]
|
||||
service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default
|
||||
|
||||
[DEFAULT]
|
||||
api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions
|
||||
|
||||
Neutron VPNaaS
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
@ -311,23 +276,6 @@ Add neutron-fwaas repo as an external repository and configure following flags i
|
||||
[service_providers]
|
||||
service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default
|
||||
|
||||
LBaaS v2 Driver
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Add lbaas repo as an external repository and configure following flags in ``local.conf``::
|
||||
|
||||
[[local]|[localrc]]
|
||||
enable_service q-lbaasv2
|
||||
Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_lbaasv2
|
||||
|
||||
Configure the service provider::
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[service_providers]
|
||||
service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default
|
||||
|
||||
[DEFAULT]
|
||||
api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions
|
||||
|
||||
Octavia
|
||||
~~~~~~~
|
||||
|
||||
@ -361,24 +309,6 @@ Add octavia and python-octaviaclient repos as external repositories and configur
|
||||
NSX-TVD
|
||||
-------
|
||||
|
||||
LBaaS v2 Driver
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Add lbaas repo as an external repository and configure following flags in ``local.conf``::
|
||||
|
||||
[[local]|[localrc]]
|
||||
enable_service q-lbaasv2
|
||||
Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_lbaasv2
|
||||
|
||||
Configure the service provider::
|
||||
[[post-config|$NEUTRON_LBAAS_CONF]]
|
||||
[service_providers]
|
||||
service_provider = LOADBALANCERV2:VMWareEdge:neutron_lbaas.drivers.vmware.edge_driver_v2.EdgeLoadBalancerDriverV2:default
|
||||
|
||||
[[post-config|$NEUTRON_CONF]]
|
||||
[DEFAULT]
|
||||
api_extensions_path = $DEST/neutron-lbaas/neutron_lbaas/extensions
|
||||
|
||||
FWaaS (V2) Driver
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -69,9 +69,6 @@ the backend when required.
|
||||
error_backup_edge: scans from backup Edge appliances which are in ERROR state.
|
||||
When in non-readonly mode, the job will reset the Edge appliance configuration.
|
||||
|
||||
lbaas_pending: scans the neutron DB for LBaaS objects which are pending for too
|
||||
long. Report it, and if in non-readonly mode change its status to ERROR
|
||||
|
||||
NSX-v3
|
||||
~~~~~~
|
||||
|
||||
|
@ -37,7 +37,7 @@ requests==2.14.2
|
||||
requests-mock==1.2.0
|
||||
six==1.10.0
|
||||
SQLAlchemy==1.2.0
|
||||
Sphinx==1.6.5
|
||||
sphinx==1.6.5
|
||||
stestr==1.0.0
|
||||
stevedore==1.20.0
|
||||
tempest==17.1.0
|
||||
|
@ -36,7 +36,6 @@ mock>=2.0.0 # BSD
|
||||
neutron>=14.0.0.0rc1 # Apache-2.0
|
||||
networking-l2gw>=14.0.0 # Apache-2.0
|
||||
networking-sfc>=8.0.0.0rc1 # Apache-2.0
|
||||
neutron-lbaas>=14.0.0.0rc1 # Apache-2.0
|
||||
neutron-fwaas>=14.0.0.0rc1 # Apache-2.0
|
||||
neutron-vpnaas>=14.0.0.0rc1 # Apache-2.0
|
||||
neutron-dynamic-routing>=14.0.0.0rc1 # Apache-2.0
|
||||
|
@ -42,9 +42,6 @@ firewall_drivers =
|
||||
vmware_nsxtvd_edge_v2 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v2:EdgeFwaasTVDriverV2
|
||||
neutron.service_plugins =
|
||||
vmware_nsxv_qos = vmware_nsx.services.qos.nsx_v.plugin:NsxVQosPlugin
|
||||
vmware_nsx_lbaasv2 = vmware_nsx.services.lbaas.nsx_plugin:LoadBalancerNSXPluginV2
|
||||
vmware_nsxtvd_lbaasv2 = vmware_nsx.services.lbaas.nsx.plugin:LoadBalancerTVPluginV2
|
||||
vmware_nsxtvd_fwaasv2 = vmware_nsx.services.fwaas.nsx_tv.plugin_v2:FwaasTVPluginV2
|
||||
vmware_nsxtvd_l2gw = vmware_nsx.services.l2gateway.nsx_tvd.plugin:L2GatewayPlugin
|
||||
vmware_nsxtvd_qos = vmware_nsx.services.qos.nsx_tvd.plugin:QoSPlugin
|
||||
vmware_nsxtvd_vpnaas = vmware_nsx.services.vpnaas.nsx_tvd.plugin:VPNPlugin
|
||||
@ -89,7 +86,6 @@ openstack.nsxclient.v2 =
|
||||
vmware_nsx.neutron.nsxv.housekeeper.jobs =
|
||||
error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob
|
||||
error_backup_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_backup_edge:ErrorBackupEdgeJob
|
||||
lbaas_pending = vmware_nsx.plugins.nsx_v.housekeeper.lbaas_pending:LbaasPendingJob
|
||||
vmware_nsx.neutron.nsxv3.housekeeper.jobs =
|
||||
orphaned_dhcp_server = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_dhcp_server:OrphanedDhcpServerJob
|
||||
orphaned_logical_switch = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_logical_switch:OrphanedLogicalSwitchJob
|
||||
|
@ -6,7 +6,6 @@ hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
||||
coverage!=4.4,>=4.0 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
flake8-import-order==0.12 # LGPLv3
|
||||
sphinx!=1.6.6,!=1.6.7,>=1.6.5 # BSD
|
||||
mock>=2.0.0 # BSD
|
||||
|
||||
psycopg2>=2.7 # LGPL/ZPL
|
||||
|
1
tox.ini
1
tox.ini
@ -30,7 +30,6 @@ commands = false
|
||||
commands =
|
||||
pip install -q -e "git+https://opendev.org/openstack/networking-l2gw#egg=networking_l2gw"
|
||||
pip install -q -e "git+https://opendev.org/openstack/networking-sfc#egg=networking_sfc"
|
||||
pip install -q -e "git+https://opendev.org/openstack/neutron-lbaas#egg=neutron_lbaas"
|
||||
pip install -q -e "git+https://opendev.org/openstack/neutron-fwaas#egg=neutron_fwaas"
|
||||
pip install -q -e "git+https://opendev.org/openstack/neutron-dynamic-routing#egg=neutron_dynamic_routing"
|
||||
pip install -q -e "git+https://opendev.org/openstack/neutron-vpnaas#egg=neutron_vpnaas"
|
||||
|
@ -814,8 +814,7 @@ nsxv_opts = [
|
||||
help=_("If False, different tenants will not use the same "
|
||||
"DHCP edge or router edge.")),
|
||||
cfg.ListOpt('housekeeping_jobs',
|
||||
default=['error_dhcp_edge', 'error_backup_edge',
|
||||
'lbaas_pending'],
|
||||
default=['error_dhcp_edge', 'error_backup_edge'],
|
||||
help=_("List of the enabled housekeeping jobs")),
|
||||
cfg.ListOpt('housekeeping_readonly_jobs',
|
||||
default=[],
|
||||
|
@ -46,6 +46,8 @@ def upgrade():
|
||||
|
||||
if migration.schema_has_table(table_name):
|
||||
inspector = reflection.Inspector.from_engine(op.get_bind())
|
||||
fk_constraint = inspector.get_foreign_keys(table_name)[0]
|
||||
op.drop_constraint(fk_constraint.get('name'), table_name,
|
||||
type_='foreignkey')
|
||||
fks = inspector.get_foreign_keys(table_name)
|
||||
if fks:
|
||||
fk_constraint = fks[0]
|
||||
op.drop_constraint(fk_constraint.get('name'), table_name,
|
||||
type_='foreignkey')
|
||||
|
@ -63,7 +63,6 @@ from vmware_nsx.plugins.common import plugin as nsx_plugin_common
|
||||
from vmware_nsx.plugins.dvs import plugin as dvs
|
||||
from vmware_nsx.plugins.nsx_v import plugin as v
|
||||
from vmware_nsx.plugins.nsx_v3 import plugin as t
|
||||
from vmware_nsx.services.lbaas.nsx import lb_driver_v2
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.services.lbaas.octavia import tvd_wrapper as octavia_tvd
|
||||
|
||||
@ -116,7 +115,6 @@ class NsxTVDPlugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
||||
|
||||
# init the extensions supported by any of the plugins
|
||||
self.init_extensions()
|
||||
self.lbv2_driver = lb_driver_v2.EdgeLoadbalancerDriverV2()
|
||||
|
||||
self._unsubscribe_callback_events()
|
||||
|
||||
|
@ -85,7 +85,6 @@ from vmware_nsx.services.lbaas.nsx_p.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.v2 import lb_driver_v2
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.services.qos.common import utils as qos_com_utils
|
||||
from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver
|
||||
@ -220,7 +219,6 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
|
||||
self._init_profiles()
|
||||
self._prepare_exclude_list()
|
||||
self._init_dhcp_metadata()
|
||||
self.lbv2_driver = self._init_lbv2_driver()
|
||||
|
||||
# Init QoS
|
||||
qos_driver.register(qos_utils.PolicyQosNotificationsHandler())
|
||||
@ -459,13 +457,6 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
|
||||
def plugin_type():
|
||||
return projectpluginmap.NsxPlugins.NSX_P
|
||||
|
||||
def _init_lbv2_driver(self):
|
||||
# Get LBaaSv2 driver during plugin initialization. If the platform
|
||||
# has a version that doesn't support native loadbalancing, the driver
|
||||
# will return a NotImplementedManager class.
|
||||
LOG.debug("Initializing LBaaSv2.0 nsxp driver")
|
||||
return lb_driver_v2.EdgeLoadbalancerDriverV2()
|
||||
|
||||
@staticmethod
|
||||
def is_tvd_plugin():
|
||||
return False
|
||||
@ -1446,10 +1437,11 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
|
||||
fw_exist = self._router_has_edge_fw_rules(context, router)
|
||||
lb_exist = False
|
||||
if not (fw_exist or snat_exist):
|
||||
lb_exist = self.service_router_has_loadbalancers(router_id)
|
||||
lb_exist = self.service_router_has_loadbalancers(
|
||||
context, router_id)
|
||||
return snat_exist or lb_exist or fw_exist
|
||||
|
||||
def service_router_has_loadbalancers(self, router_id):
|
||||
def service_router_has_loadbalancers(self, context, router_id):
|
||||
tags_to_search = [{'scope': lb_const.LR_ROUTER_TYPE, 'tag': router_id}]
|
||||
router_lb_services = self.nsxpolicy.search_by_tags(
|
||||
tags_to_search,
|
||||
|
@ -1,108 +0,0 @@
|
||||
# Copyright 2018 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
from neutron_lbaas.db.loadbalancer import models
|
||||
from neutron_lib import constants
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsx.extensions import projectpluginmap
|
||||
from vmware_nsx.plugins.common.housekeeper import base_job
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ELEMENT_LIFETIME = 3 * 60 * 60 # Three hours lifetime
|
||||
|
||||
|
||||
class LbaasPendingJob(base_job.BaseJob):
|
||||
lbaas_objects = {}
|
||||
lbaas_models = [models.LoadBalancer,
|
||||
models.Listener,
|
||||
models.L7Policy,
|
||||
models.L7Rule,
|
||||
models.PoolV2,
|
||||
models.MemberV2,
|
||||
models.HealthMonitorV2]
|
||||
|
||||
def get_project_plugin(self, plugin):
|
||||
return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_V)
|
||||
|
||||
def get_name(self):
|
||||
return 'lbaas_pending'
|
||||
|
||||
def get_description(self):
|
||||
return 'Monitor LBaaS objects in pending states'
|
||||
|
||||
def run(self, context, readonly=False):
|
||||
super(LbaasPendingJob, self).run(context)
|
||||
curr_time = time.time()
|
||||
error_count = 0
|
||||
fixed_count = 0
|
||||
error_info = ''
|
||||
|
||||
for model in self.lbaas_models:
|
||||
sess = context.session
|
||||
elements = sess.query(model).filter(
|
||||
model.provisioning_status.in_(
|
||||
[constants.PENDING_CREATE,
|
||||
constants.PENDING_UPDATE,
|
||||
constants.PENDING_DELETE])).all()
|
||||
|
||||
for element in elements:
|
||||
if element['id'] in self.lbaas_objects:
|
||||
obj = self.lbaas_objects[element['id']]
|
||||
lifetime = curr_time - obj['time_added']
|
||||
if lifetime > ELEMENT_LIFETIME:
|
||||
# Entry has been pending for more than lifetime.
|
||||
# Report and remove when in R/W mode
|
||||
error_count += 1
|
||||
error_info = base_job.housekeeper_warning(
|
||||
error_info,
|
||||
'LBaaS %s %s is stuck in pending state',
|
||||
model.NAME, element['id'])
|
||||
|
||||
if not readonly:
|
||||
element['provisioning_status'] = constants.ERROR
|
||||
fixed_count += 1
|
||||
del self.lbaas_objects[element['id']]
|
||||
else:
|
||||
# Entry is still pending but haven't reached lifetime
|
||||
LOG.debug('Housekeeping: LBaaS object %s %s in '
|
||||
'PENDING state for %d seconds', model.NAME,
|
||||
element['id'], lifetime)
|
||||
obj['time_seen'] = curr_time
|
||||
else:
|
||||
# Entry wasn't seen before this iteration - add to dict
|
||||
LOG.debug('Housekeeping: monitoring PENDING state for '
|
||||
'LBaaS object %s %s', model.NAME, element['id'])
|
||||
self.lbaas_objects[element.id] = {
|
||||
'model': model,
|
||||
'time_added': curr_time,
|
||||
'time_seen': curr_time}
|
||||
|
||||
# Look for dictionary entries which weren't seen in this iteration.
|
||||
# Such entries were either removed from DB or their state was changed.
|
||||
for obj_id in self.lbaas_objects.keys():
|
||||
if self.lbaas_objects[obj_id]['time_seen'] != curr_time:
|
||||
LOG.debug('Housekeeping: LBaaS %s %s is back to normal',
|
||||
self.lbaas_objects[obj_id]['model'].NAME, obj_id)
|
||||
del self.lbaas_objects[obj_id]
|
||||
|
||||
if error_count == 0:
|
||||
error_info = 'No LBaaS objects in pending state'
|
||||
return {'error_count': error_count,
|
||||
'fixed_count': fixed_count,
|
||||
'error_info': error_info}
|
@ -5053,3 +5053,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
|
||||
|
||||
def _get_appservice_id(self, name):
|
||||
return self.nsx_v.vcns.get_application_id(name)
|
||||
|
||||
def service_router_has_loadbalancers(self, context, router_id):
|
||||
# This api is used by Octavia to verify that a router can be deleted
|
||||
# Currently the V plugin does not support this
|
||||
return False
|
||||
|
@ -22,14 +22,11 @@ from vmware_nsx.plugins.nsx_v.vshield import edge_dynamic_routing_driver
|
||||
from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver
|
||||
from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks
|
||||
from vmware_nsx.plugins.nsx_v.vshield import vcns
|
||||
from vmware_nsx.services.lbaas.nsx_v.v2 import (
|
||||
edge_loadbalancer_driver_v2 as lbaas_v2)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
|
||||
lbaas_v2.EdgeLoadbalancerDriverV2,
|
||||
edge_firewall_driver.EdgeFirewallDriver,
|
||||
edge_dynamic_routing_driver.EdgeDynamicRoutingDriver):
|
||||
|
||||
|
@ -100,7 +100,6 @@ from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.v2 import lb_driver_v2
|
||||
from vmware_nsx.services.lbaas.octavia import constants as oct_const
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.services.qos.common import utils as qos_com_utils
|
||||
@ -202,7 +201,6 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
|
||||
else:
|
||||
nsxlib_utils.set_inject_headers_callback(
|
||||
v3_utils.inject_requestid_header)
|
||||
self.lbv2_driver = self._init_lbv2_driver()
|
||||
|
||||
registry.subscribe(
|
||||
self.on_subnetpool_address_scope_updated,
|
||||
@ -474,18 +472,6 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
|
||||
self.fwaas_callbacks = fwaas_callbacks_v2.Nsxv3FwaasCallbacksV2(
|
||||
with_rpc)
|
||||
|
||||
def _init_lbv2_driver(self):
|
||||
# Get LBaaSv2 driver during plugin initialization. If the platform
|
||||
# has a version that doesn't support native loadbalancing, the driver
|
||||
# will return a NotImplementedManager class.
|
||||
LOG.debug("Initializing LBaaSv2.0 nsxv3 driver")
|
||||
if self.nsxlib.feature_supported(nsxlib_consts.FEATURE_LOAD_BALANCER):
|
||||
return lb_driver_v2.EdgeLoadbalancerDriverV2()
|
||||
else:
|
||||
LOG.warning("Current NSX version %(ver)s doesn't support LBaaS",
|
||||
{'ver': self.nsxlib.get_version()})
|
||||
return lb_driver_v2.DummyLoadbalancerDriverV2()
|
||||
|
||||
def init_availability_zones(self):
|
||||
self._availability_zones_data = nsx_az.NsxV3AvailabilityZones(
|
||||
use_tvd_config=self._is_sub_plugin)
|
||||
@ -2103,17 +2089,20 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
|
||||
return self.nsxlib.router.has_service_router(nsx_router_id)
|
||||
|
||||
def service_router_has_services(self, context, router_id):
|
||||
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
|
||||
router_id)
|
||||
router = self._get_router(context, router_id)
|
||||
snat_exist = router.enable_snat
|
||||
lb_exist = nsx_db.has_nsx_lbaas_loadbalancer_binding_by_router(
|
||||
context.session, nsx_router_id)
|
||||
lb_exist = self.service_router_has_loadbalancers(context, router_id)
|
||||
fw_exist = self._router_has_edge_fw_rules(context, router)
|
||||
if snat_exist or lb_exist or fw_exist:
|
||||
return True
|
||||
return snat_exist or lb_exist or fw_exist
|
||||
|
||||
def service_router_has_loadbalancers(self, context, router_id):
|
||||
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
|
||||
router_id)
|
||||
return nsx_db.has_nsx_lbaas_loadbalancer_binding_by_router(
|
||||
context.session, nsx_router_id)
|
||||
|
||||
def create_service_router(self, context, router_id, router=None,
|
||||
update_firewall=True):
|
||||
"""Create a service router and enable standby relocation"""
|
||||
|
@ -1,118 +0,0 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import helpers as log_helpers
|
||||
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from neutron_lib.plugins import constants as plugin_const
|
||||
from neutron_lib.plugins import directory
|
||||
|
||||
from vmware_nsx.extensions import projectpluginmap
|
||||
|
||||
|
||||
class LBaaSNSXObjectManagerWrapper(object):
|
||||
"""Wrapper class to connect the LB api with the NSX-V/V3 implementations
|
||||
|
||||
This class will call the actual NSX-V LBaaS logic after translating
|
||||
the LB object into a dictionary, and will also handle success/failure cases
|
||||
"""
|
||||
_core_plugin = None
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self, object_type, implementor, translator, get_completor):
|
||||
super(LBaaSNSXObjectManagerWrapper, self).__init__()
|
||||
self.object_type = object_type
|
||||
self.implementor = implementor
|
||||
self.translator = translator
|
||||
self.get_completor = get_completor
|
||||
|
||||
def _get_plugin(self, plugin_type):
|
||||
return directory.get_plugin(plugin_type)
|
||||
|
||||
@property
|
||||
def core_plugin(self):
|
||||
if not self._core_plugin:
|
||||
self._core_plugin = (
|
||||
self._get_plugin(plugin_const.CORE))
|
||||
if self._core_plugin.is_tvd_plugin():
|
||||
# get the plugin that match this driver
|
||||
self._core_plugin = self._core_plugin.get_plugin_by_type(
|
||||
projectpluginmap.NsxPlugins.NSX_T)
|
||||
return self._core_plugin
|
||||
|
||||
def get_completor_func(self, context, obj, delete=False):
|
||||
# return a method that will be called on success/failure completion
|
||||
def completor_func(success=True):
|
||||
completor = self.get_completor()
|
||||
if completor:
|
||||
if success:
|
||||
return completor.successful_completion(
|
||||
context, obj, delete=delete)
|
||||
else:
|
||||
return completor.failed_completion(
|
||||
context, obj)
|
||||
|
||||
return completor_func
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, obj, **args):
|
||||
obj_dict = self.translator(obj)
|
||||
completor_func = self.get_completor_func(context, obj)
|
||||
return self.implementor.create(context, obj_dict, completor_func,
|
||||
**args)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_obj, new_obj, **args):
|
||||
old_obj_dict = self.translator(old_obj)
|
||||
new_obj_dict = self.translator(new_obj)
|
||||
completor_func = self.get_completor_func(context, new_obj)
|
||||
return self.implementor.update(context, old_obj_dict, new_obj_dict,
|
||||
completor_func, **args)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, obj, **args):
|
||||
obj_dict = self.translator(obj)
|
||||
completor_func = self.get_completor_func(context, obj, delete=True)
|
||||
return self.implementor.delete(context, obj_dict, completor_func,
|
||||
**args)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def refresh(self, context, obj):
|
||||
# verify that this api exists (supported only for loadbalancer)
|
||||
if not hasattr(self.implementor, 'refresh'):
|
||||
msg = (_("LBaaS object %s does not support refresh api") %
|
||||
self.object_type)
|
||||
raise n_exc.BadRequest(resource='edge', msg=msg)
|
||||
obj_dict = self.translator(obj)
|
||||
return self.implementor.refresh(context, obj_dict)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def stats(self, context, obj):
|
||||
# verify that this api exists (supported only for loadbalancer)
|
||||
if not hasattr(self.implementor, 'stats'):
|
||||
msg = (_("LBaaS object %s does not support stats api") %
|
||||
self.object_type)
|
||||
raise n_exc.BadRequest(resource='edge', msg=msg)
|
||||
obj_dict = self.translator(obj)
|
||||
return self.implementor.stats(context, obj_dict)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def get_operating_status(self, context, id, **args):
|
||||
# verify that this api exists (supported only for loadbalancer)
|
||||
if not hasattr(self.implementor, 'get_operating_status'):
|
||||
msg = (_("LBaaS object %s does not support get_operating_status "
|
||||
"api") % self.object_type)
|
||||
raise n_exc.BadRequest(resource='edge', msg=msg)
|
||||
return self.implementor.get_operating_status(context, id, **args)
|
@ -1,21 +0,0 @@
|
||||
# Copyright 2018 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This file contains LBaaS mocks, to allow the vmware nsx plugins to work when
|
||||
# LBaaS code does not exist, and LBaaS is not configured in neutron
|
||||
|
||||
|
||||
class LoadBalancer(object):
|
||||
pass
|
@ -1,265 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import helpers as log_helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron_lib import exceptions as n_exc
|
||||
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdgeLoadbalancerDriverV2(object):
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self):
|
||||
super(EdgeLoadbalancerDriverV2, self).__init__()
|
||||
|
||||
self.loadbalancer = EdgeLoadBalancerManager()
|
||||
self.listener = EdgeListenerManager()
|
||||
self.pool = EdgePoolManager()
|
||||
self.member = EdgeMemberManager()
|
||||
self.healthmonitor = EdgeHealthMonitorManager()
|
||||
self.l7policy = EdgeL7PolicyManager()
|
||||
self.l7rule = EdgeL7RuleManager()
|
||||
|
||||
|
||||
class EdgeLoadBalancerManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, lb):
|
||||
# verify that the subnet belongs to the same plugin as the lb
|
||||
lb_p = self.core_plugin._get_plugin_from_project(context,
|
||||
lb.tenant_id)
|
||||
subnet_p = self.core_plugin._get_subnet_plugin_by_id(
|
||||
context, lb.vip_subnet_id)
|
||||
if lb_p.plugin_type() != subnet_p.plugin_type():
|
||||
self.lbv2_driver.load_balancer.failed_completion(context, lb)
|
||||
msg = (_('Subnet must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return lb_p.lbv2_driver.loadbalancer.create(context, lb)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_lb, new_lb):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_lb.tenant_id)
|
||||
return p.lbv2_driver.loadbalancer.update(context, old_lb, new_lb)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, lb):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
lb.tenant_id)
|
||||
return p.lbv2_driver.loadbalancer.delete(context, lb)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def refresh(self, context, lb):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
lb.tenant_id)
|
||||
return p.lbv2_driver.loadbalancer.refresh(context, lb)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def stats(self, context, lb):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
lb.tenant_id)
|
||||
return p.lbv2_driver.loadbalancer.stats(context, lb)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def get_operating_status(self, context, id, with_members=False):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
context.project_id)
|
||||
return p.lbv2_driver.loadbalancer.get_operating_status(
|
||||
context, id, with_members=with_members)
|
||||
|
||||
|
||||
class EdgeListenerManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, listener, certificate=None):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
listener.tenant_id)
|
||||
if listener.loadbalancer:
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, listener.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('Listener must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
|
||||
return p.lbv2_driver.listener.create(context, listener,
|
||||
certificate=certificate)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_listener, new_listener, certificate=None):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_listener.tenant_id)
|
||||
return p.lbv2_driver.listener.update(context,
|
||||
old_listener,
|
||||
new_listener,
|
||||
certificate=certificate)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, listener):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
listener.tenant_id)
|
||||
return p.lbv2_driver.listener.delete(context, listener)
|
||||
|
||||
|
||||
class EdgePoolManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, pool):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
pool.tenant_id)
|
||||
if pool.loadbalancer:
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, pool.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('Pool must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return p.lbv2_driver.pool.create(context, pool)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_pool, new_pool):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_pool.tenant_id)
|
||||
return p.lbv2_driver.pool.update(context, old_pool, new_pool)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, pool):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
pool.tenant_id)
|
||||
return p.lbv2_driver.pool.delete(context, pool)
|
||||
|
||||
|
||||
class EdgeMemberManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, member):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
member.tenant_id)
|
||||
if member.pool and member.pool.loadbalancer:
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, member.pool.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('Member must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return p.lbv2_driver.member.create(context, member)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_member, new_member):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_member.tenant_id)
|
||||
return p.lbv2_driver.member.update(context, old_member, new_member)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, member):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
member.tenant_id)
|
||||
return p.lbv2_driver.member.delete(context, member)
|
||||
|
||||
|
||||
class EdgeHealthMonitorManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, hm):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
hm.tenant_id)
|
||||
if hm.pool and hm.pool.loadbalancer:
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, hm.pool.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('Health monitor must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return p.lbv2_driver.healthmonitor.create(context, hm)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_hm, new_hm):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_hm.tenant_id)
|
||||
return p.lbv2_driver.healthmonitor.update(context, old_hm, new_hm)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, hm):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
hm.tenant_id)
|
||||
return p.lbv2_driver.healthmonitor.delete(context, hm)
|
||||
|
||||
|
||||
class EdgeL7PolicyManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, policy):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
policy.tenant_id)
|
||||
if policy.listener and policy.listener.loadbalancer:
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, policy.listener.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('L7 Policy must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return p.lbv2_driver.l7policy.create(context, policy)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_policy, new_policy):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_policy.tenant_id)
|
||||
return p.lbv2_driver.l7policy.update(context, old_policy, new_policy)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, policy):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
policy.tenant_id)
|
||||
return p.lbv2_driver.l7policy.delete(context, policy)
|
||||
|
||||
|
||||
class EdgeL7RuleManager(base_mgr.LoadbalancerBaseManager):
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def create(self, context, rule):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
rule.tenant_id)
|
||||
if (rule.policy and rule.policy.listener and
|
||||
rule.policy.listener.loadbalancer):
|
||||
# Verify that this is the same plugin as the loadbalancer
|
||||
lb_p = self.core_plugin._get_plugin_from_project(
|
||||
context, rule.policy.listener.loadbalancer.tenant_id)
|
||||
if lb_p != p:
|
||||
msg = (_('L7 Rule must belong to the plugin %s, as the '
|
||||
'loadbalancer') % lb_p.plugin_type())
|
||||
raise n_exc.BadRequest(resource='edge-lbaas', msg=msg)
|
||||
return p.lbv2_driver.l7rule.create(context, rule)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def update(self, context, old_rule, new_rule):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
new_rule.tenant_id)
|
||||
return p.lbv2_driver.l7rule.update(context, old_rule, new_rule)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
def delete(self, context, rule):
|
||||
p = self.core_plugin._get_plugin_from_project(context,
|
||||
rule.tenant_id)
|
||||
return p.lbv2_driver.l7rule.delete(context, rule)
|
@ -1,31 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from vmware_nsx.services.lbaas import nsx_plugin
|
||||
|
||||
from vmware_nsx.plugins.nsx import utils as tvd_utils
|
||||
|
||||
|
||||
@tvd_utils.filter_plugins
|
||||
class LoadBalancerTVPluginV2(nsx_plugin.LoadBalancerNSXPluginV2):
|
||||
"""NSX-TV plugin for LBaaS V2.
|
||||
|
||||
This plugin adds separation between T/V instances
|
||||
"""
|
||||
methods_to_separate = ['get_loadbalancers',
|
||||
'get_listeners',
|
||||
'get_pools',
|
||||
'get_healthmonitors',
|
||||
'get_l7policies']
|
@ -1,158 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib.callbacks import resources
|
||||
from neutron_lib import constants as n_consts
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import helpers as log_helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
from vmware_nsx.services.lbaas import lb_helper
|
||||
from vmware_nsx.services.lbaas import lb_translators
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import healthmonitor_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import l7policy_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import l7rule_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_p.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.octavia import constants as oct_const
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NotImplementedManager(object):
|
||||
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
|
||||
it is missing any of the required object managers.
|
||||
"""
|
||||
|
||||
def create(self, context, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, context, old_obj, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete(self, context, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self):
|
||||
super(EdgeLoadbalancerDriverV2, self).__init__()
|
||||
|
||||
# Init all LBaaS objects
|
||||
# Note(asarfaty): self.lbv2_driver is not yet defined at init time
|
||||
# so lambda is used to retrieve it later.
|
||||
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"loadbalancer",
|
||||
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
|
||||
lb_translators.lb_loadbalancer_obj_to_dict,
|
||||
lambda: self.lbv2_driver.load_balancer)
|
||||
|
||||
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"listener",
|
||||
listener_mgr.EdgeListenerManagerFromDict(),
|
||||
lb_translators.lb_listener_obj_to_dict,
|
||||
lambda: self.lbv2_driver.listener)
|
||||
|
||||
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"pool",
|
||||
pool_mgr.EdgePoolManagerFromDict(),
|
||||
lb_translators.lb_pool_obj_to_dict,
|
||||
lambda: self.lbv2_driver.pool)
|
||||
|
||||
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"member",
|
||||
member_mgr.EdgeMemberManagerFromDict(),
|
||||
lb_translators.lb_member_obj_to_dict,
|
||||
lambda: self.lbv2_driver.member)
|
||||
|
||||
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"healthmonitor",
|
||||
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
|
||||
lb_translators.lb_hm_obj_to_dict,
|
||||
lambda: self.lbv2_driver.health_monitor)
|
||||
|
||||
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7policy",
|
||||
l7policy_mgr.EdgeL7PolicyManagerFromDict(),
|
||||
lb_translators.lb_l7policy_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7policy)
|
||||
|
||||
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7rule",
|
||||
l7rule_mgr.EdgeL7RuleManagerFromDict(),
|
||||
lb_translators.lb_l7rule_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7rule)
|
||||
|
||||
self._subscribe_router_delete_callback()
|
||||
|
||||
def _subscribe_router_delete_callback(self):
|
||||
# Check if there is any LB attachment for the NSX router.
|
||||
# This callback is subscribed here to prevent router/GW/interface
|
||||
# deletion if it still has LB service attached to it.
|
||||
|
||||
#Note(asarfaty): Those callbacks are used by Octavia as well even
|
||||
# though they are bound only here
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _unsubscribe_router_delete_callback(self):
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _get_lb_ports(self, context, subnet_ids):
|
||||
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
|
||||
dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA
|
||||
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
|
||||
'fixed_ips': {'subnet_id': subnet_ids}}
|
||||
return self.loadbalancer.core_plugin.get_ports(
|
||||
context, filters=filters)
|
||||
|
||||
def _check_lb_service_on_router(self, resource, event, trigger,
|
||||
payload=None):
|
||||
"""Prevent removing a router GW or deleting a router used by LB"""
|
||||
router_id = payload.resource_id
|
||||
if self.loadbalancer.core_plugin.service_router_has_loadbalancers(
|
||||
router_id):
|
||||
msg = _('Cannot delete a %s as it still has lb service '
|
||||
'attachment') % resource
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
def _check_lb_service_on_router_interface(
|
||||
self, resource, event, trigger, payload=None):
|
||||
# Prevent removing the interface of an LB subnet from a router
|
||||
router_id = payload.resource_id
|
||||
subnet_id = payload.metadata.get('subnet_id')
|
||||
if not router_id or not subnet_id:
|
||||
return
|
||||
|
||||
# get LB ports and check if any loadbalancer is using this subnet
|
||||
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
|
||||
msg = _('Cannot delete a router interface as it used by a '
|
||||
'loadbalancer')
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
@ -1,85 +0,0 @@
|
||||
# Copyright 2018 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron_lbaas.db.loadbalancer import models
|
||||
from neutron_lbaas.services.loadbalancer import plugin
|
||||
|
||||
from vmware_nsx.services.lbaas import lb_const
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoadBalancerNSXPluginV2(plugin.LoadBalancerPluginv2):
|
||||
"""NSX Plugin for LBaaS V2.
|
||||
|
||||
This plugin overrides the statuses call to issue the DB update before
|
||||
displaying the results.
|
||||
"""
|
||||
|
||||
def nsx_update_operational_statuses(self, context, loadbalancer_id,
|
||||
with_members=False):
|
||||
"""Update LB objects operating status
|
||||
|
||||
Call the driver to get the current statuses, and update those in the DB
|
||||
"""
|
||||
# get the driver
|
||||
driver = self._get_driver_for_loadbalancer(
|
||||
context, loadbalancer_id)
|
||||
driver_obj = driver.load_balancer.lbv2_driver
|
||||
|
||||
# Get the current statuses from the driver
|
||||
lb_statuses = driver_obj.loadbalancer.get_operating_status(
|
||||
context, loadbalancer_id, with_members=with_members)
|
||||
if not lb_statuses:
|
||||
return
|
||||
|
||||
# update the new statuses in the LBaaS DB
|
||||
if lb_const.LOADBALANCERS in lb_statuses:
|
||||
for lb in lb_statuses[lb_const.LOADBALANCERS]:
|
||||
self.db.update_status(context, models.LoadBalancer, lb['id'],
|
||||
operating_status=lb['status'])
|
||||
if lb_const.LISTENERS in lb_statuses:
|
||||
for listener in lb_statuses[lb_const.LISTENERS]:
|
||||
self.db.update_status(context, models.Listener, listener['id'],
|
||||
operating_status=listener['status'])
|
||||
if lb_const.POOLS in lb_statuses:
|
||||
for pool in lb_statuses[lb_const.POOLS]:
|
||||
self.db.update_status(context, models.PoolV2, pool['id'],
|
||||
operating_status=pool['status'])
|
||||
if lb_const.MEMBERS in lb_statuses:
|
||||
for member in lb_statuses[lb_const.MEMBERS]:
|
||||
self.db.update_status(context, models.MemberV2, member['id'],
|
||||
operating_status=member['status'])
|
||||
|
||||
def statuses(self, context, loadbalancer_id):
|
||||
# Update the LB statuses before letting the plugin display them
|
||||
self.nsx_update_operational_statuses(context, loadbalancer_id,
|
||||
with_members=True)
|
||||
|
||||
# use super code to get the updated statuses
|
||||
return super(LoadBalancerNSXPluginV2, self).statuses(
|
||||
context, loadbalancer_id)
|
||||
|
||||
def get_loadbalancer(self, context, loadbalancer_id, fields=None):
|
||||
# Update the LB status before letting the plugin display it in the
|
||||
# loadbalancer display
|
||||
self.nsx_update_operational_statuses(context, loadbalancer_id)
|
||||
|
||||
return super(LoadBalancerNSXPluginV2, self).get_loadbalancer(
|
||||
context, loadbalancer_id, fields=fields)
|
||||
|
||||
# TODO(asarfaty) : do the implementation for V objects as well
|
@ -1,78 +0,0 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_log import helpers as log_helpers
|
||||
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
from vmware_nsx.services.lbaas import lb_helper
|
||||
from vmware_nsx.services.lbaas import lb_translators
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr
|
||||
|
||||
|
||||
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self):
|
||||
super(EdgeLoadbalancerDriverV2, self).__init__()
|
||||
# Init all LBaaS objects
|
||||
# Note(asarfaty): self.lbv2_driver is not yet defined at init time
|
||||
# so lambda is used to retrieve it later.
|
||||
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"loadbalancer",
|
||||
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(self),
|
||||
lb_translators.lb_loadbalancer_obj_to_dict,
|
||||
lambda: self.lbv2_driver.load_balancer)
|
||||
|
||||
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"listener",
|
||||
listener_mgr.EdgeListenerManagerFromDict(self),
|
||||
lb_translators.lb_listener_obj_to_dict,
|
||||
lambda: self.lbv2_driver.listener)
|
||||
|
||||
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"pool",
|
||||
pool_mgr.EdgePoolManagerFromDict(self),
|
||||
lb_translators.lb_pool_obj_to_dict,
|
||||
lambda: self.lbv2_driver.pool)
|
||||
|
||||
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"member",
|
||||
member_mgr.EdgeMemberManagerFromDict(self),
|
||||
lb_translators.lb_member_obj_to_dict,
|
||||
lambda: self.lbv2_driver.member)
|
||||
|
||||
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"healthmonitor",
|
||||
healthmon_mgr.EdgeHealthMonitorManagerFromDict(self),
|
||||
lb_translators.lb_hm_obj_to_dict,
|
||||
lambda: self.lbv2_driver.health_monitor)
|
||||
|
||||
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7policy",
|
||||
l7policy_mgr.EdgeL7PolicyManagerFromDict(self),
|
||||
lb_translators.lb_l7policy_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7policy)
|
||||
|
||||
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7rule",
|
||||
l7rule_mgr.EdgeL7RuleManagerFromDict(self),
|
||||
lb_translators.lb_l7rule_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7rule)
|
@ -1,197 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib.callbacks import resources
|
||||
from neutron_lib import constants as n_consts
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import helpers as log_helpers
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx._i18n import _
|
||||
from vmware_nsx.db import db as nsx_db
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
from vmware_nsx.services.lbaas import lb_helper
|
||||
from vmware_nsx.services.lbaas import lb_translators
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.octavia import constants as oct_const
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NotImplementedManager(object):
|
||||
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
|
||||
it is missing any of the required object managers.
|
||||
"""
|
||||
|
||||
def create(self, context, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, context, old_obj, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete(self, context, obj):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class EdgeLoadbalancerDriverV2(base_mgr.LoadbalancerBaseManager):
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self):
|
||||
super(EdgeLoadbalancerDriverV2, self).__init__()
|
||||
|
||||
# Init all LBaaS objects
|
||||
# Note(asarfaty): self.lbv2_driver is not yet defined at init time
|
||||
# so lambda is used to retrieve it later.
|
||||
self.loadbalancer = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"loadbalancer",
|
||||
loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
|
||||
lb_translators.lb_loadbalancer_obj_to_dict,
|
||||
lambda: self.lbv2_driver.load_balancer)
|
||||
|
||||
self.listener = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"listener",
|
||||
listener_mgr.EdgeListenerManagerFromDict(),
|
||||
lb_translators.lb_listener_obj_to_dict,
|
||||
lambda: self.lbv2_driver.listener)
|
||||
|
||||
self.pool = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"pool",
|
||||
pool_mgr.EdgePoolManagerFromDict(),
|
||||
lb_translators.lb_pool_obj_to_dict,
|
||||
lambda: self.lbv2_driver.pool)
|
||||
|
||||
self.member = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"member",
|
||||
member_mgr.EdgeMemberManagerFromDict(),
|
||||
lb_translators.lb_member_obj_to_dict,
|
||||
lambda: self.lbv2_driver.member)
|
||||
|
||||
self.healthmonitor = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"healthmonitor",
|
||||
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
|
||||
lb_translators.lb_hm_obj_to_dict,
|
||||
lambda: self.lbv2_driver.health_monitor)
|
||||
|
||||
self.l7policy = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7policy",
|
||||
l7policy_mgr.EdgeL7PolicyManagerFromDict(),
|
||||
lb_translators.lb_l7policy_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7policy)
|
||||
|
||||
self.l7rule = lb_helper.LBaaSNSXObjectManagerWrapper(
|
||||
"l7rule",
|
||||
l7rule_mgr.EdgeL7RuleManagerFromDict(),
|
||||
lb_translators.lb_l7rule_obj_to_dict,
|
||||
lambda: self.lbv2_driver.l7rule)
|
||||
|
||||
self._subscribe_router_delete_callback()
|
||||
|
||||
def _subscribe_router_delete_callback(self):
|
||||
# Check if there is any LB attachment for the NSX router.
|
||||
# This callback is subscribed here to prevent router/GW/interface
|
||||
# deletion if it still has LB service attached to it.
|
||||
|
||||
#Note(asarfaty): Those callbacks are used by Octavia as well even
|
||||
# though they are bound only here
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _unsubscribe_router_delete_callback(self):
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _get_lb_ports(self, context, subnet_ids):
|
||||
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
|
||||
dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA
|
||||
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
|
||||
'fixed_ips': {'subnet_id': subnet_ids}}
|
||||
return self.loadbalancer.core_plugin.get_ports(
|
||||
context, filters=filters)
|
||||
|
||||
def _check_lb_service_on_router(self, resource, event, trigger,
|
||||
payload=None):
|
||||
"""Prevent removing a router GW or deleting a router used by LB"""
|
||||
router_id = payload.resource_id
|
||||
context = payload.context
|
||||
nsx_router_id = nsx_db.get_nsx_router_id(context.session,
|
||||
router_id)
|
||||
if not nsx_router_id:
|
||||
# Skip non-v3 routers (could be a V router in case of TVD plugin)
|
||||
return
|
||||
nsxlib = self.loadbalancer.core_plugin.nsxlib
|
||||
service_client = nsxlib.load_balancer.service
|
||||
# Check if there is any lb service on nsx router
|
||||
lb_service = service_client.get_router_lb_service(nsx_router_id)
|
||||
if lb_service:
|
||||
msg = _('Cannot delete a %s as it still has lb service '
|
||||
'attachment') % resource
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
# Also check if there are any loadbalancers attached to this router
|
||||
# subnets
|
||||
router_subnets = self.loadbalancer.core_plugin._find_router_subnets(
|
||||
context.elevated(), router_id)
|
||||
subnet_ids = [subnet['id'] for subnet in router_subnets]
|
||||
if subnet_ids and self._get_lb_ports(context.elevated(), subnet_ids):
|
||||
msg = (_('Cannot delete a %s as it used by a loadbalancer') %
|
||||
resource)
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
def _check_lb_service_on_router_interface(
|
||||
self, resource, event, trigger, payload=None):
|
||||
# Prevent removing the interface of an LB subnet from a router
|
||||
router_id = payload.resource_id
|
||||
subnet_id = payload.metadata.get('subnet_id')
|
||||
if not router_id or not subnet_id:
|
||||
return
|
||||
|
||||
nsx_router_id = nsx_db.get_nsx_router_id(payload.context.session,
|
||||
router_id)
|
||||
if not nsx_router_id:
|
||||
# Skip non-v3 routers (could be a V router in case of TVD plugin)
|
||||
return
|
||||
|
||||
# get LB ports and check if any loadbalancer is using this subnet
|
||||
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
|
||||
msg = _('Cannot delete a router interface as it used by a '
|
||||
'loadbalancer')
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
|
||||
class DummyLoadbalancerDriverV2(object):
|
||||
@log_helpers.log_method_call
|
||||
def __init__(self):
|
||||
self.loadbalancer = NotImplementedManager()
|
||||
self.listener = NotImplementedManager()
|
||||
self.pool = NotImplementedManager()
|
||||
self.member = NotImplementedManager()
|
||||
self.health_monitor = NotImplementedManager()
|
||||
self.l7policy = NotImplementedManager()
|
||||
self.l7rule = NotImplementedManager()
|
@ -18,7 +18,12 @@ import time
|
||||
|
||||
import eventlet
|
||||
|
||||
from neutron_lib.callbacks import events
|
||||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib.callbacks import resources
|
||||
from neutron_lib import constants as n_consts
|
||||
from neutron_lib import context as neutron_context
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_config import cfg
|
||||
from oslo_log import helpers as log_helpers
|
||||
from oslo_log import log as logging
|
||||
@ -27,12 +32,6 @@ from oslo_messaging.rpc import dispatcher
|
||||
|
||||
from vmware_nsx.services.lbaas.octavia import constants
|
||||
|
||||
try:
|
||||
from neutron_lbaas.db.loadbalancer import models
|
||||
except ImportError:
|
||||
# LBaaS project not found.
|
||||
from vmware_nsx.services.lbaas import lbaas_mocks as models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -87,6 +86,80 @@ class NSXOctaviaListenerEndpoint(object):
|
||||
self.l7policy = l7policy
|
||||
self.l7rule = l7rule
|
||||
|
||||
self._subscribe_router_delete_callback()
|
||||
|
||||
def _subscribe_router_delete_callback(self):
|
||||
# Check if there is any LB attachment for the NSX router.
|
||||
# This callback is subscribed here to prevent router/GW/interface
|
||||
# deletion if it still has LB service attached to it.
|
||||
|
||||
#Note(asarfaty): Those callbacks are used by Octavia as well even
|
||||
# though they are bound only here
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.subscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _unsubscribe_router_delete_callback(self):
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router,
|
||||
resources.ROUTER_GATEWAY, events.BEFORE_DELETE)
|
||||
registry.unsubscribe(self._check_lb_service_on_router_interface,
|
||||
resources.ROUTER_INTERFACE, events.BEFORE_DELETE)
|
||||
|
||||
def _get_core_plugin(self, context, project_id=None):
|
||||
core_plugin = self.loadbalancer.core_plugin
|
||||
if core_plugin.is_tvd_plugin():
|
||||
# get the right plugin for this project
|
||||
# (if project_id is None, the default one will be returned)
|
||||
core_plugin = core_plugin._get_plugin_from_project(
|
||||
context, project_id)
|
||||
return core_plugin
|
||||
|
||||
def _get_default_core_plugin(self, context):
|
||||
return self._get_core_plugin(context, project_id=None)
|
||||
|
||||
def _get_lb_ports(self, context, subnet_ids):
|
||||
dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2
|
||||
dev_owner_oct = constants.DEVICE_OWNER_OCTAVIA
|
||||
filters = {'device_owner': [dev_owner_v2, dev_owner_oct],
|
||||
'fixed_ips': {'subnet_id': subnet_ids}}
|
||||
core_plugin = self._get_default_core_plugin(context)
|
||||
return core_plugin.get_ports(context, filters=filters)
|
||||
|
||||
def _check_lb_service_on_router(self, resource, event, trigger,
|
||||
payload=None):
|
||||
"""Prevent removing a router GW or deleting a router used by LB"""
|
||||
router_id = payload.resource_id
|
||||
# get the default core plugin so we can get the router project
|
||||
default_core_plugin = self._get_default_core_plugin(payload.context)
|
||||
router = default_core_plugin.get_router(payload.context, router_id)
|
||||
# get the real core plugin
|
||||
core_plugin = self._get_core_plugin(
|
||||
payload.context, router['project_id'])
|
||||
if core_plugin.service_router_has_loadbalancers(
|
||||
payload.context, router_id):
|
||||
msg = _('Cannot delete a %s as it still has lb service '
|
||||
'attachment') % resource
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
def _check_lb_service_on_router_interface(
|
||||
self, resource, event, trigger, payload=None):
|
||||
# Prevent removing the interface of an LB subnet from a router
|
||||
router_id = payload.resource_id
|
||||
subnet_id = payload.metadata.get('subnet_id')
|
||||
if not router_id or not subnet_id:
|
||||
return
|
||||
|
||||
# get LB ports and check if any loadbalancer is using this subnet
|
||||
if self._get_lb_ports(payload.context.elevated(), [subnet_id]):
|
||||
msg = _('Cannot delete a router interface as it used by a '
|
||||
'loadbalancer')
|
||||
raise n_exc.BadRequest(resource='lbaas-lb', msg=msg)
|
||||
|
||||
def get_completor_func(self, obj_type, obj, delete=False, cascade=False):
|
||||
# return a method that will be called on success/failure completion
|
||||
def completor_func(success=True):
|
||||
@ -509,20 +582,6 @@ class NSXOctaviaStatisticsCollector(object):
|
||||
time.sleep(interval)
|
||||
self.collect()
|
||||
|
||||
def _get_nl_loadbalancers(self, context):
|
||||
"""Getting the list of neutron-lbaas loadbalancers
|
||||
|
||||
This is done directly from the neutron-lbaas DB to also support the
|
||||
case that the plugin is currently unavailable, but entries already
|
||||
exist on the DB.
|
||||
"""
|
||||
if not hasattr(models.LoadBalancer, '__tablename__'):
|
||||
# No neutron-lbaas on this deployment
|
||||
return []
|
||||
|
||||
nl_loadbalancers = context.session.query(models.LoadBalancer).all()
|
||||
return [lb.id for lb in nl_loadbalancers]
|
||||
|
||||
def collect(self):
|
||||
if not self.core_plugin.octavia_listener:
|
||||
return
|
||||
@ -530,14 +589,8 @@ class NSXOctaviaStatisticsCollector(object):
|
||||
endpoint = self.core_plugin.octavia_listener.endpoints[0]
|
||||
context = neutron_context.get_admin_context()
|
||||
|
||||
# get the statistics of all the Octavia loadbalancers/listeners while
|
||||
# ignoring the neutron-lbaas loadbalancers.
|
||||
# Note(asarfaty): The Octavia plugin/DB is unavailable from the
|
||||
# neutron context, so there is no option to query the Octavia DB for
|
||||
# the relevant loadbalancers.
|
||||
nl_loadbalancers = self._get_nl_loadbalancers(context)
|
||||
listeners_stats = self.listener_stats_getter(
|
||||
context, self.core_plugin, ignore_list=nl_loadbalancers)
|
||||
context, self.core_plugin)
|
||||
if not listeners_stats:
|
||||
# Avoid sending empty stats
|
||||
return
|
||||
|
@ -16,6 +16,8 @@
|
||||
from oslo_log import log as logging
|
||||
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from neutron_lib.plugins import constants
|
||||
from neutron_lib.plugins import directory
|
||||
|
||||
from vmware_nsx.extensions import projectpluginmap
|
||||
from vmware_nsx.plugins.nsx import utils as tvd_utils
|
||||
@ -24,6 +26,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OctaviaTVDWrapper(object):
|
||||
_core_plugin = None
|
||||
|
||||
def __init__(self, v_manager, t_manager):
|
||||
self.managers = {}
|
||||
@ -32,6 +35,16 @@ class OctaviaTVDWrapper(object):
|
||||
if t_manager:
|
||||
self.managers[projectpluginmap.NsxPlugins.NSX_T] = t_manager
|
||||
|
||||
def _get_plugin(self, plugin_type):
|
||||
return directory.get_plugin(plugin_type)
|
||||
|
||||
@property
|
||||
def core_plugin(self):
|
||||
if not self._core_plugin:
|
||||
self._core_plugin = (
|
||||
self._get_plugin(constants.CORE))
|
||||
return self._core_plugin
|
||||
|
||||
def _get_manager_by_project(self, context, project_id):
|
||||
plugin_type = tvd_utils.get_tvd_plugin_type_for_project(
|
||||
project_id, context=context)
|
||||
|
@ -62,7 +62,6 @@ ORPHANED_VNICS = 'orphaned-vnics'
|
||||
MISSING_EDGES = 'missing-edges'
|
||||
METADATA = 'metadata'
|
||||
MISSING_NETWORKS = 'missing-networks'
|
||||
LBAAS = 'lbaas'
|
||||
BGP_GW_EDGE = 'bgp-gw-edge'
|
||||
ROUTING_REDIS_RULE = 'routing-redistribution-rule'
|
||||
BGP_NEIGHBOUR = 'bgp-neighbour'
|
||||
|
@ -1,80 +0,0 @@
|
||||
# Copyright 2018 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import xml.etree.ElementTree as et
|
||||
|
||||
from neutron_lbaas.db.loadbalancer import models as nlbaas_v2
|
||||
from neutron_lib.callbacks import registry
|
||||
|
||||
from vmware_nsx.common import locking
|
||||
from vmware_nsx.plugins.nsx_v.vshield import vcns as nsxv_api
|
||||
from vmware_nsx.shell.admin.plugins.common import constants
|
||||
from vmware_nsx.shell.admin.plugins.common.utils import output_header
|
||||
from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils
|
||||
from vmware_nsx.shell.resources import Operations
|
||||
|
||||
LBAAS_FW_SECTION_NAME = 'LBaaS FW Rules'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@output_header
|
||||
def sync_lbaas_dfw_rules(resource, event, trigger, **kwargs):
|
||||
vcns = utils.get_nsxv_client()
|
||||
with locking.LockManager.get_lock('lbaas-fw-section'):
|
||||
fw_section_id = vcns.get_section_id(LBAAS_FW_SECTION_NAME)
|
||||
if not fw_section_id:
|
||||
section = et.Element('section')
|
||||
section.attrib['name'] = LBAAS_FW_SECTION_NAME
|
||||
sect = vcns.create_section('ip', et.tostring(section))[1]
|
||||
fw_section_id = et.fromstring(sect).attrib['id']
|
||||
|
||||
if not fw_section_id:
|
||||
LOG.error('No LBaaS FW Section id found')
|
||||
return
|
||||
|
||||
neutron_db = utils.NeutronDbClient()
|
||||
pools = neutron_db.context.session.query(nlbaas_v2.PoolV2).all()
|
||||
pool_ids = [pool['id'] for pool in pools]
|
||||
|
||||
section_uri = '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX,
|
||||
'layer3sections',
|
||||
fw_section_id)
|
||||
|
||||
xml_section_data = vcns.get_section(section_uri)
|
||||
if xml_section_data:
|
||||
xml_section = xml_section_data[1]
|
||||
else:
|
||||
LOG.info('LBaaS XML section was not found!')
|
||||
return
|
||||
|
||||
section = et.fromstring(xml_section)
|
||||
|
||||
for rule in section.findall('.//rule'):
|
||||
if rule.find('name').text in pool_ids:
|
||||
LOG.info('Rule %s found and valid', rule.find('name').text)
|
||||
else:
|
||||
section.remove(rule)
|
||||
LOG.info('Rule %s is stale and removed',
|
||||
rule.find('name').text)
|
||||
|
||||
vcns.update_section(section_uri,
|
||||
et.tostring(section, encoding="us-ascii"),
|
||||
None)
|
||||
|
||||
|
||||
registry.subscribe(sync_lbaas_dfw_rules,
|
||||
constants.LBAAS,
|
||||
Operations.NSX_UPDATE.value)
|
@ -239,8 +239,6 @@ nsxv_resources = {
|
||||
constants.BGP_NEIGHBOUR: Resource(constants.BGP_NEIGHBOUR,
|
||||
[Operations.CREATE.value,
|
||||
Operations.DELETE.value]),
|
||||
constants.LBAAS: Resource(constants.LBAAS,
|
||||
[Operations.NSX_UPDATE.value]),
|
||||
}
|
||||
|
||||
|
||||
|
@ -1375,12 +1375,12 @@ class NsxPTestL3NatTest(common_v3.FixExternalNetBaseTest,
|
||||
|
||||
# Make sure the LB callback is not called on router deletion
|
||||
self.lb_mock1 = mock.patch(
|
||||
"vmware_nsx.services.lbaas.nsx_p.v2.lb_driver_v2."
|
||||
"EdgeLoadbalancerDriverV2._check_lb_service_on_router")
|
||||
"vmware_nsx.services.lbaas.octavia.octavia_listener."
|
||||
"NSXOctaviaListenerEndpoint._check_lb_service_on_router")
|
||||
self.lb_mock1.start()
|
||||
self.lb_mock2 = mock.patch(
|
||||
"vmware_nsx.services.lbaas.nsx_p.v2.lb_driver_v2."
|
||||
"EdgeLoadbalancerDriverV2._check_lb_service_on_router_interface")
|
||||
"vmware_nsx.services.lbaas.octavia.octavia_listener."
|
||||
"NSXOctaviaListenerEndpoint._check_lb_service_on_router_interface")
|
||||
self.lb_mock2.start()
|
||||
|
||||
super(NsxPTestL3NatTest, self).setUp(*args, **kwargs)
|
||||
|
@ -17,17 +17,25 @@
|
||||
import mock
|
||||
from neutron.services.flavors import flavors_plugin
|
||||
from neutron.tests import base
|
||||
from neutron_lbaas.services.loadbalancer import data_models as lb_models
|
||||
from neutron_lib import context
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_config import cfg
|
||||
|
||||
from vmware_nsx.db import nsxv_db
|
||||
from vmware_nsx.plugins.nsx_v.vshield import vcns_driver
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_translators
|
||||
|
||||
|
||||
# TODO(asarfaty): Use octavia api for those tests
|
||||
LB_VIP = '10.0.0.10'
|
||||
LB_SUBNET = 'some-subnet'
|
||||
LB_EDGE_ID = 'edge-x'
|
||||
@ -97,13 +105,30 @@ class BaseTestEdgeLbaasV2(base.BaseTestCase):
|
||||
def _tested_entity(self):
|
||||
return None
|
||||
|
||||
def completor(self, success=True):
|
||||
self.last_completor_succees = success
|
||||
self.last_completor_called = True
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestEdgeLbaasV2, self).setUp()
|
||||
|
||||
self.last_completor_succees = False
|
||||
self.last_completor_called = False
|
||||
self.context = context.get_admin_context()
|
||||
callbacks = mock.Mock()
|
||||
callbacks.plugin = mock.Mock()
|
||||
self.edge_driver = vcns_driver.VcnsDriver(callbacks)
|
||||
self.nsx_v = mock.Mock()
|
||||
octavia_objects = {
|
||||
'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(
|
||||
self.nsx_v),
|
||||
'listener': listener_mgr.EdgeListenerManagerFromDict(self.nsx_v),
|
||||
'pool': pool_mgr.EdgePoolManagerFromDict(self.nsx_v),
|
||||
'member': member_mgr.EdgeMemberManagerFromDict(self.nsx_v),
|
||||
'healthmonitor': healthmon_mgr.EdgeHealthMonitorManagerFromDict(
|
||||
self.nsx_v),
|
||||
'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(self.nsx_v),
|
||||
'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict(self.nsx_v)}
|
||||
|
||||
self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint(
|
||||
**octavia_objects)
|
||||
|
||||
self.lbv2_driver = mock.Mock()
|
||||
self.core_plugin = mock.Mock()
|
||||
@ -156,6 +181,24 @@ class BaseTestEdgeLbaasV2(base.BaseTestCase):
|
||||
value='/images',
|
||||
policy=self.l7policy)
|
||||
|
||||
# Translate LBaaS objects to dictionaries
|
||||
self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(
|
||||
self.lb)
|
||||
self.listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
self.listener)
|
||||
self.pool_dict = lb_translators.lb_pool_obj_to_dict(
|
||||
self.pool)
|
||||
self.member_dict = lb_translators.lb_member_obj_to_dict(
|
||||
self.member)
|
||||
self.hm_dict = lb_translators.lb_hm_obj_to_dict(
|
||||
self.hm)
|
||||
self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict(
|
||||
self.l7policy)
|
||||
self.l7rule1_dict = lb_translators.lb_l7rule_obj_to_dict(
|
||||
self.l7rule1)
|
||||
self.l7rule2_dict = lb_translators.lb_l7rule_obj_to_dict(
|
||||
self.l7rule2)
|
||||
|
||||
def tearDown(self):
|
||||
self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity)
|
||||
super(BaseTestEdgeLbaasV2, self).tearDown()
|
||||
@ -209,22 +252,23 @@ class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2):
|
||||
mock_get_edge.return_value = LB_EDGE_ID
|
||||
mock_add_vip_fwr.return_value = LB_VIP_FWR_ID
|
||||
mock_get_lb_binding_by_edge.return_value = []
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb)
|
||||
self.edge_driver.loadbalancer.create(
|
||||
self.context, self.lb_dict, self.completor)
|
||||
|
||||
if self._deploy_on_router:
|
||||
mock_vip_sec_ip.assert_called_with(self.edge_driver.vcns,
|
||||
mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns,
|
||||
LB_EDGE_ID,
|
||||
LB_VIP)
|
||||
mock_get_edge.assert_called_with(mock.ANY, mock.ANY,
|
||||
LB_SUBNET, LB_TENANT_ID)
|
||||
else:
|
||||
mock_set_fw_rule.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID, 'accept')
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept')
|
||||
mock_get_edge.assert_called_with(mock.ANY, mock.ANY, LB_ID,
|
||||
LB_VIP, mock.ANY,
|
||||
LB_TENANT_ID, 'compact')
|
||||
|
||||
mock_add_vip_fwr.assert_called_with(self.edge_driver.vcns,
|
||||
mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns,
|
||||
LB_EDGE_ID,
|
||||
LB_ID,
|
||||
LB_VIP)
|
||||
@ -233,24 +277,19 @@ class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2):
|
||||
LB_EDGE_ID,
|
||||
LB_VIP_FWR_ID,
|
||||
LB_VIP)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
mock_enable_edge_acceleration.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID)
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID)
|
||||
|
||||
def test_update(self):
|
||||
new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb-name', 'heh-huh',
|
||||
LB_SUBNET, 'port-id', LB_VIP)
|
||||
|
||||
self.edge_driver.loadbalancer.update(self.context, self.lb, new_lb)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context, new_lb,
|
||||
delete=False)
|
||||
new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb)
|
||||
self.edge_driver.loadbalancer.update(
|
||||
self.context, self.lb_dict, new_lb_dict, self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete_old(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
@ -271,23 +310,21 @@ class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2):
|
||||
mock_get_binding.return_value = LB_BINDING
|
||||
mock_get_ports.return_value = []
|
||||
mock_get_r_binding.return_value = {'router_id': 'xxxx'}
|
||||
self.edge_driver.loadbalancer.delete(self.context, self.lb)
|
||||
self.edge_driver.loadbalancer.delete(
|
||||
self.context, self.lb_dict, self.completor)
|
||||
|
||||
mock_del_fwr.assert_called_with(self.edge_driver.vcns,
|
||||
mock_del_fwr.assert_called_with(self.edge_driver.pool.vcns,
|
||||
LB_EDGE_ID,
|
||||
LB_VIP_FWR_ID)
|
||||
mock_vip_sec_ip.assert_called_with(self.edge_driver.vcns,
|
||||
mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns,
|
||||
LB_EDGE_ID,
|
||||
LB_VIP)
|
||||
mock_del_binding.assert_called_with(self.context.session,
|
||||
LB_ID)
|
||||
mock_set_fw_rule.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID, 'deny')
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=True)
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny')
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete_new(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
@ -306,19 +343,17 @@ class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2):
|
||||
mock_get_ports.return_value = []
|
||||
router_id = 'lbaas-xxxx'
|
||||
mock_get_r_binding.return_value = {'router_id': router_id}
|
||||
self.edge_driver.loadbalancer.delete(self.context, self.lb)
|
||||
self.edge_driver.loadbalancer.delete(
|
||||
self.context, self.lb_dict, self.completor)
|
||||
|
||||
mock_del_binding.assert_called_with(self.context.session,
|
||||
LB_ID)
|
||||
mock_set_fw_rule.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID, 'deny')
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny')
|
||||
mock_delete_lrouter.assert_called_with(
|
||||
mock.ANY, 'lbaas-' + LB_ID, dist=False)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_stats(self):
|
||||
pass
|
||||
@ -360,9 +395,11 @@ class TestEdgeLbaasV2LoadbalancerOnEdge(TestEdgeLbaasV2LoadbalancerOnRtr):
|
||||
mock_add_vip_fwr.return_value = LB_VIP_FWR_ID
|
||||
mock_get_lb_binding_by_edge.return_value = []
|
||||
self.lb.flavor_id = 'dummy'
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb)
|
||||
lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb)
|
||||
self.edge_driver.loadbalancer.create(
|
||||
self.context, lb_dict, self.completor)
|
||||
|
||||
mock_add_vip_fwr.assert_called_with(self.edge_driver.vcns,
|
||||
mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns,
|
||||
LB_EDGE_ID,
|
||||
LB_ID,
|
||||
LB_VIP)
|
||||
@ -372,17 +409,14 @@ class TestEdgeLbaasV2LoadbalancerOnEdge(TestEdgeLbaasV2LoadbalancerOnRtr):
|
||||
LB_VIP_FWR_ID,
|
||||
LB_VIP)
|
||||
mock_set_fw_rule.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID, 'accept')
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept')
|
||||
mock_get_edge.assert_called_with(
|
||||
mock.ANY, mock.ANY, LB_ID, LB_VIP,
|
||||
mock.ANY, LB_TENANT_ID, flavor_name)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
mock_enable_edge_acceleration.assert_called_with(
|
||||
self.edge_driver.vcns, LB_EDGE_ID)
|
||||
self.edge_driver.pool.vcns, LB_EDGE_ID)
|
||||
self.lb.flavor_id = None
|
||||
|
||||
def test_create_with_illegal_flavor(self):
|
||||
@ -400,10 +434,11 @@ class TestEdgeLbaasV2LoadbalancerOnEdge(TestEdgeLbaasV2LoadbalancerOnRtr):
|
||||
mock_add_vip_fwr.return_value = LB_VIP_FWR_ID
|
||||
mock_get_lb_binding_by_edge.return_value = []
|
||||
self.lb.flavor_id = 'dummy'
|
||||
lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb)
|
||||
self.assertRaises(
|
||||
n_exc.InvalidInput,
|
||||
self.edge_driver.loadbalancer.create,
|
||||
self.context, self.lb)
|
||||
self.context, lb_dict, self.completor)
|
||||
self.lb.flavor_id = None
|
||||
|
||||
|
||||
@ -418,9 +453,9 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
def test_create(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'create_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'create_app_profile'
|
||||
) as mock_create_app_prof, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'create_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'create_vip'
|
||||
) as mock_create_vip, \
|
||||
mock.patch.object(nsxv_db, 'add_nsxv_lbaas_listener_binding'
|
||||
) as mock_add_binding, \
|
||||
@ -432,7 +467,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_create_vip.return_value = (
|
||||
{'location': 'x/' + EDGE_VIP_ID}, None)
|
||||
|
||||
self.edge_driver.listener.create(self.context, self.listener)
|
||||
self.edge_driver.listener.create(
|
||||
self.context, self.listener_dict, self.completor)
|
||||
|
||||
mock_create_app_prof.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_APP_PROF_DEF)
|
||||
@ -441,11 +477,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_add_binding.assert_called_with(
|
||||
self.context.session, LB_ID, LISTENER_ID, EDGE_APP_PROFILE_ID,
|
||||
EDGE_VIP_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -454,6 +487,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
loadbalancer=self.lb,
|
||||
admin_state_up=True)
|
||||
new_listener.default_pool = self.pool
|
||||
new_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
new_listener)
|
||||
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding'
|
||||
) as mock_get_listener_binding, \
|
||||
@ -461,15 +496,16 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding',
|
||||
return_value=None), \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile'
|
||||
) as mock_upd_app_prof, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip:
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
|
||||
self.edge_driver.listener.update(
|
||||
self.context, self.listener, new_listener)
|
||||
self.context, self.listener_dict, new_listener_dict,
|
||||
self.completor)
|
||||
|
||||
mock_upd_app_prof.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_APP_PROFILE_ID,
|
||||
@ -479,38 +515,33 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
edge_vip_def['port'] = 8000
|
||||
mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID,
|
||||
edge_vip_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding'
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'delete_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'delete_vip'
|
||||
) as mock_del_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'delete_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_profile'
|
||||
) as mock_del_app_prof, \
|
||||
mock.patch.object(nsxv_db, 'del_nsxv_lbaas_listener_binding'
|
||||
) as mock_del_binding:
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
|
||||
self.edge_driver.listener.delete(self.context, self.listener)
|
||||
self.edge_driver.listener.delete(
|
||||
self.context, self.listener_dict, self.completor)
|
||||
|
||||
mock_del_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID)
|
||||
mock_del_app_prof.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_APP_PROFILE_ID)
|
||||
mock_del_binding.assert_called_with(self.context.session,
|
||||
LB_ID, LISTENER_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.listener,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
@ -526,20 +557,21 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'create_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'create_pool'
|
||||
) as mock_create_pool, \
|
||||
mock.patch.object(nsxv_db, 'add_nsxv_lbaas_pool_binding'
|
||||
) as mock_add_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip,\
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile'
|
||||
) as mock_upd_app_prof:
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_create_pool.return_value = (
|
||||
{'location': 'x/' + EDGE_POOL_ID}, None)
|
||||
|
||||
self.edge_driver.pool.create(self.context, self.pool)
|
||||
self.edge_driver.pool.create(
|
||||
self.context, self.pool_dict, self.completor)
|
||||
|
||||
mock_create_pool.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_POOL_DEF.copy())
|
||||
@ -552,16 +584,14 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_upd_app_prof.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_APP_PROFILE_ID,
|
||||
EDGE_APP_PROF_DEF)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.pool,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '',
|
||||
None, 'HTTP', 'LEAST_CONNECTIONS',
|
||||
listener=self.listener)
|
||||
new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool)
|
||||
list_bind = {'app_profile_id': EDGE_APP_PROFILE_ID}
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
@ -569,11 +599,11 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding,\
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding',
|
||||
return_value=list_bind),\
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_upd_pool,\
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool,\
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile'
|
||||
) as mock_upd_app_prof:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
@ -581,7 +611,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
fake_edge['monitorId'] = 'monitor-7'
|
||||
fake_edge['member'] = ['member1', 'member2']
|
||||
mock_get_pool.return_value = (None, fake_edge)
|
||||
self.edge_driver.pool.update(self.context, self.pool, new_pool)
|
||||
self.edge_driver.pool.update(
|
||||
self.context, self.pool_dict, new_pool_dict, self.completor)
|
||||
|
||||
edge_pool_def = EDGE_POOL_DEF.copy()
|
||||
edge_pool_def['algorithm'] = 'leastconn'
|
||||
@ -592,11 +623,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_upd_app_prof.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_APP_PROFILE_ID,
|
||||
EDGE_APP_PROF_DEF)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_pool,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
@ -605,33 +633,31 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding,\
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding'
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'delete_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'delete_pool'
|
||||
) as mock_del_pool, \
|
||||
mock.patch.object(nsxv_db, 'del_nsxv_lbaas_pool_binding'
|
||||
) as mock_del_binding,\
|
||||
mock.patch.object(lb_common, 'is_lb_on_router_edge'
|
||||
) as mock_lb_router, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_profile'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile'
|
||||
):
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_lb_router.return_value = False
|
||||
|
||||
self.edge_driver.pool.delete(self.context, self.pool)
|
||||
self.edge_driver.pool.delete(
|
||||
self.context, self.pool_dict, self.completor)
|
||||
|
||||
mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID,
|
||||
EDGE_VIP_DEF)
|
||||
mock_del_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_ID)
|
||||
mock_del_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.pool,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
@ -649,37 +675,36 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge'
|
||||
), \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_update_pool:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy())
|
||||
|
||||
self.edge_driver.member.create(self.context, self.member)
|
||||
self.edge_driver.member.create(
|
||||
self.context, self.member_dict, self.completor)
|
||||
|
||||
edge_pool_def = EDGE_POOL_DEF.copy()
|
||||
edge_pool_def['member'] = [EDGE_MEMBER_DEF]
|
||||
mock_update_pool.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.member,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID,
|
||||
MEMBER_ADDRESS, 8000, 1, True,
|
||||
pool=self.pool)
|
||||
new_member_dict = lb_translators.lb_member_obj_to_dict(new_member)
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_update_pool:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
@ -687,8 +712,9 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
edge_pool_def['member'] = [EDGE_MEMBER_DEF]
|
||||
mock_get_pool.return_value = (None, edge_pool_def)
|
||||
|
||||
self.edge_driver.member.update(self.context, self.member,
|
||||
new_member)
|
||||
self.edge_driver.member.update(
|
||||
self.context, self.member_dict,
|
||||
new_member_dict, self.completor)
|
||||
|
||||
edge_member_def = EDGE_MEMBER_DEF.copy()
|
||||
edge_member_def['port'] = 8000
|
||||
@ -697,18 +723,15 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
edge_pool_def['member'] = [edge_member_def]
|
||||
mock_update_pool.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_member,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool, \
|
||||
mock.patch.object(self.core_plugin, 'get_ports'
|
||||
) as mock_get_ports, \
|
||||
@ -716,7 +739,7 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
) as mock_lb_router, \
|
||||
mock.patch.object(lb_common, 'delete_lb_interface'
|
||||
) as mock_del_lb_iface, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_update_pool:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
@ -725,18 +748,16 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
edge_pool_def['member'] = [EDGE_MEMBER_DEF]
|
||||
mock_get_pool.return_value = (None, edge_pool_def)
|
||||
mock_get_ports.return_value = []
|
||||
self.edge_driver.member.delete(self.context, self.member)
|
||||
self.edge_driver.member.delete(
|
||||
self.context, self.member_dict, self.completor)
|
||||
|
||||
edge_pool_def['member'] = []
|
||||
mock_update_pool.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def)
|
||||
mock_del_lb_iface.assert_called_with(
|
||||
self.context, self.core_plugin, LB_ID, None)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.member,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
@ -754,13 +775,13 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding'
|
||||
) as mock_get_mon_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'create_health_monitor'
|
||||
) as mock_create_hm, \
|
||||
mock.patch.object(self.edge_driver.pool.vcns,
|
||||
'create_health_monitor') as mock_create_hm, \
|
||||
mock.patch.object(nsxv_db, 'add_nsxv_lbaas_monitor_binding'
|
||||
) as mock_add_hm_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_update_pool:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
@ -769,7 +790,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
{'location': 'x/' + EDGE_HM_ID}, None)
|
||||
mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy())
|
||||
|
||||
self.edge_driver.healthmonitor.create(self.context, self.hm)
|
||||
self.edge_driver.healthmonitor.create(
|
||||
self.context, self.hm_dict, self.completor)
|
||||
|
||||
mock_create_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_DEF)
|
||||
mock_add_hm_binding.assert_called_with(
|
||||
@ -779,39 +801,33 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
edge_pool_def['monitorId'] = [EDGE_HM_ID]
|
||||
mock_update_pool.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.hm,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3,
|
||||
3, pool=self.pool)
|
||||
|
||||
new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm)
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding'
|
||||
) as mock_get_mon_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_health_monitor'
|
||||
) as mock_upd_hm:
|
||||
mock.patch.object(self.edge_driver.pool.vcns,
|
||||
'update_health_monitor') as mock_upd_hm:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
mock_get_mon_binding.return_value = HM_BINDING
|
||||
|
||||
self.edge_driver.healthmonitor.update(
|
||||
self.context, self.hm, new_hm)
|
||||
self.context, self.hm_dict, new_hm_dict, self.completor)
|
||||
|
||||
edge_hm_def = EDGE_HM_DEF.copy()
|
||||
edge_hm_def['maxRetries'] = 3
|
||||
mock_upd_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID, edge_hm_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_hm,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
@ -820,11 +836,11 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding'
|
||||
) as mock_get_mon_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'delete_health_monitor'
|
||||
) as mock_del_hm, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns,
|
||||
'delete_health_monitor') as mock_del_hm, \
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_pool'
|
||||
) as mock_get_pool, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_pool'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_pool'
|
||||
) as mock_update_pool, \
|
||||
mock.patch.object(nsxv_db, 'del_nsxv_lbaas_monitor_binding'
|
||||
) as mock_del_binding:
|
||||
@ -837,7 +853,7 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
mock_get_pool.return_value = (None, edge_pool_def)
|
||||
|
||||
self.edge_driver.healthmonitor.delete(
|
||||
self.context, self.hm)
|
||||
self.context, self.hm_dict, self.completor)
|
||||
|
||||
mock_del_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID)
|
||||
edge_pool_def['monitorId'] = []
|
||||
@ -845,11 +861,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def)
|
||||
mock_del_binding.assert_called_with(self.context.session, LB_ID,
|
||||
POOL_ID, HM_ID, LB_EDGE_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.hm,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
@ -869,11 +882,11 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(nsxv_db, 'add_nsxv_lbaas_l7policy_binding'
|
||||
) as mock_add_l7policy_binding,\
|
||||
mock.patch.object(self.edge_driver.vcns, 'create_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'create_app_rule'
|
||||
) as mock_create_rule, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_vip'
|
||||
) as mock_get_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_l7policy_binding.return_value = L7POL_BINDING
|
||||
@ -882,7 +895,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
{'location': 'x/' + EDGE_RULE_ID}, None)
|
||||
mock_get_vip.return_value = (None, EDGE_VIP_DEF.copy())
|
||||
|
||||
self.edge_driver.l7policy.create(self.context, self.l7policy)
|
||||
self.edge_driver.l7policy.create(
|
||||
self.context, self.l7policy_dict, self.completor)
|
||||
|
||||
mock_create_rule.assert_called_with(LB_EDGE_ID,
|
||||
EDGE_L7POL_DEF.copy())
|
||||
@ -893,11 +907,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID]
|
||||
mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID,
|
||||
edge_vip_def)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.l7policy,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
url = 'http://www.test.com'
|
||||
@ -909,18 +920,18 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
redirect_url=url,
|
||||
listener=self.listener,
|
||||
position=2)
|
||||
|
||||
new_pol_dict = lb_translators.lb_l7policy_obj_to_dict(new_pol)
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding'
|
||||
) as mock_get_l7policy_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding'
|
||||
) as mock_get_lb_binding, \
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding'
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_vip'
|
||||
) as mock_get_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule'
|
||||
) as mock_update_rule:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_l7policy_binding.return_value = L7POL_BINDING
|
||||
@ -929,19 +940,17 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID]
|
||||
mock_get_vip.return_value = (None, edge_vip_def)
|
||||
|
||||
self.edge_driver.l7policy.update(self.context, self.l7policy,
|
||||
new_pol)
|
||||
self.edge_driver.l7policy.update(
|
||||
self.context, self.l7policy_dict,
|
||||
new_pol_dict, self.completor)
|
||||
|
||||
edge_rule_def = EDGE_L7POL_DEF.copy()
|
||||
edge_rule_def['script'] = "redirect location %s if TRUE" % url
|
||||
mock_update_rule.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def)
|
||||
mock_upd_vip.assert_called()
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_pol,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding'
|
||||
@ -954,11 +963,11 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
) as mock_get_pool_binding,\
|
||||
mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding'
|
||||
) as mock_get_listener_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'delete_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_rule'
|
||||
) as mock_del_app_rule, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'get_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'get_vip'
|
||||
) as mock_get_vip, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_vip'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_vip'
|
||||
) as mock_upd_vip:
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
@ -968,7 +977,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID]
|
||||
mock_get_vip.return_value = (None, edge_vip_def)
|
||||
|
||||
self.edge_driver.l7policy.delete(self.context, self.l7policy)
|
||||
self.edge_driver.l7policy.delete(
|
||||
self.context, self.l7policy_dict, self.completor)
|
||||
|
||||
edge_vip_def2 = EDGE_VIP_DEF.copy()
|
||||
edge_vip_def2['applicationRuleId'] = []
|
||||
@ -977,11 +987,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
mock_del_app_rule.assert_called_with(LB_EDGE_ID, EDGE_RULE_ID)
|
||||
mock_del_l7policy_binding.assert_called_with(
|
||||
self.context.session, L7POL_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.l7policy,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
@ -995,13 +1002,15 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
def test_create(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding'
|
||||
) as mock_get_l7policy_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule'
|
||||
) as mock_update_rule:
|
||||
mock_get_l7policy_binding.return_value = L7POL_BINDING
|
||||
|
||||
# Create the first rule
|
||||
self.l7rule1.policy.rules = [self.l7rule1]
|
||||
self.edge_driver.l7rule.create(self.context, self.l7rule1)
|
||||
rule1_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1)
|
||||
self.edge_driver.l7rule.create(
|
||||
self.context, rule1_dict, self.completor)
|
||||
|
||||
edge_rule_def = EDGE_L7POL_DEF.copy()
|
||||
edge_rule_def['script'] = (
|
||||
@ -1011,14 +1020,14 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_update_rule.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.l7rule1, delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
# Create the 2nd rule
|
||||
self.l7rule2.policy.rules = [self.l7rule1, self.l7rule2]
|
||||
self.edge_driver.l7rule.create(self.context, self.l7rule2)
|
||||
rule2_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule2)
|
||||
self.edge_driver.l7rule.create(
|
||||
self.context, rule2_dict, self.completor)
|
||||
|
||||
edge_rule_def = EDGE_L7POL_DEF.copy()
|
||||
edge_rule_def['script'] = (
|
||||
@ -1029,11 +1038,8 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
'rule_id2': L7RULE_ID2})
|
||||
mock_update_rule.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.l7rule2, delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_rule = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID,
|
||||
@ -1044,16 +1050,17 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
key='key2',
|
||||
value='val1',
|
||||
policy=self.l7policy)
|
||||
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding'
|
||||
) as mock_get_l7policy_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule'
|
||||
) as mock_update_rule:
|
||||
mock_get_l7policy_binding.return_value = L7POL_BINDING
|
||||
|
||||
new_rule.policy.rules = [new_rule]
|
||||
new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_rule)
|
||||
self.edge_driver.l7rule.update(
|
||||
self.context, self.l7rule1, new_rule)
|
||||
self.context, self.l7rule1_dict, new_rule_dict,
|
||||
self.completor)
|
||||
|
||||
edge_rule_def = EDGE_L7POL_DEF.copy()
|
||||
edge_rule_def['script'] = (
|
||||
@ -1063,20 +1070,20 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_update_rule.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, new_rule, delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding'
|
||||
) as mock_get_l7policy_binding, \
|
||||
mock.patch.object(self.edge_driver.vcns, 'update_app_rule'
|
||||
mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule'
|
||||
) as mock_update_rule:
|
||||
mock_get_l7policy_binding.return_value = L7POL_BINDING
|
||||
|
||||
self.l7rule1.policy.rules = []
|
||||
self.edge_driver.l7rule.delete(self.context, self.l7rule1)
|
||||
rule_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1)
|
||||
self.edge_driver.l7rule.delete(
|
||||
self.context, rule_dict, self.completor)
|
||||
|
||||
edge_rule_def = EDGE_L7POL_DEF.copy()
|
||||
edge_rule_def['script'] = (
|
||||
@ -1084,7 +1091,5 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_update_rule.assert_called_with(
|
||||
LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.l7rule1, delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
@ -54,8 +54,10 @@ from webob import exc
|
||||
|
||||
from vmware_nsx.api_client import exception as api_exc
|
||||
from vmware_nsx.common import utils
|
||||
from vmware_nsx.db import db as nsx_db
|
||||
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin
|
||||
from vmware_nsx.services.lbaas.nsx_v3.v2 import lb_driver_v2
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.tests import unit as vmware
|
||||
from vmware_nsx.tests.unit.common_plugin import common_v3
|
||||
from vmware_nsx.tests.unit.extensions import test_metadata
|
||||
@ -1913,12 +1915,12 @@ class L3NatTest(test_l3_plugin.L3BaseForIntTests,
|
||||
mock_nsx_version.start()
|
||||
# Make sure the LB callback is not called on router deletion
|
||||
self.lb_mock1 = mock.patch(
|
||||
"vmware_nsx.services.lbaas.nsx_v3.v2.lb_driver_v2."
|
||||
"EdgeLoadbalancerDriverV2._check_lb_service_on_router")
|
||||
"vmware_nsx.services.lbaas.octavia.octavia_listener."
|
||||
"NSXOctaviaListenerEndpoint._check_lb_service_on_router")
|
||||
self.lb_mock1.start()
|
||||
self.lb_mock2 = mock.patch(
|
||||
"vmware_nsx.services.lbaas.nsx_v3.v2.lb_driver_v2."
|
||||
"EdgeLoadbalancerDriverV2._check_lb_service_on_router_interface")
|
||||
"vmware_nsx.services.lbaas.octavia.octavia_listener."
|
||||
"NSXOctaviaListenerEndpoint._check_lb_service_on_router_interface")
|
||||
self.lb_mock2.start()
|
||||
|
||||
super(L3NatTest, self).setUp(
|
||||
@ -2313,18 +2315,24 @@ class TestL3NatTestCase(L3NatTest,
|
||||
self.lb_mock1.stop()
|
||||
self.lb_mock2.stop()
|
||||
# Create the LB object - here the delete callback is registered
|
||||
lb_driver = lb_driver_v2.EdgeLoadbalancerDriverV2()
|
||||
loadbalancer = loadbalancer_mgr.EdgeLoadBalancerManagerFromDict()
|
||||
oct_listener = octavia_listener.NSXOctaviaListenerEndpoint(
|
||||
loadbalancer=loadbalancer)
|
||||
with self.router() as router:
|
||||
with mock.patch('vmware_nsxlib.v3.load_balancer.Service.'
|
||||
'get_router_lb_service'),\
|
||||
mock.patch('vmware_nsx.db.db.get_nsx_router_id',
|
||||
return_value=1):
|
||||
return_value='1'),\
|
||||
mock.patch.object(
|
||||
nsx_db,
|
||||
'has_nsx_lbaas_loadbalancer_binding_by_router',
|
||||
return_value=True):
|
||||
self.assertRaises(nc_exc.CallbackFailure,
|
||||
self.plugin_instance.delete_router,
|
||||
context.get_admin_context(),
|
||||
router['router']['id'])
|
||||
# Unregister callback
|
||||
lb_driver._unsubscribe_router_delete_callback()
|
||||
oct_listener._unsubscribe_router_delete_callback()
|
||||
self.lb_mock1.start()
|
||||
self.lb_mock2.start()
|
||||
|
||||
|
172
vmware_nsx/tests/unit/services/lbaas/lb_constants.py
Normal file
172
vmware_nsx/tests/unit/services/lbaas/lb_constants.py
Normal file
@ -0,0 +1,172 @@
|
||||
# Copyright 2019 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
|
||||
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
|
||||
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
|
||||
SUPPORTED_LB_ALGORITHMS = (LB_METHOD_LEAST_CONNECTIONS, LB_METHOD_ROUND_ROBIN,
|
||||
LB_METHOD_SOURCE_IP)
|
||||
|
||||
PROTOCOL_TCP = 'TCP'
|
||||
PROTOCOL_HTTP = 'HTTP'
|
||||
PROTOCOL_HTTPS = 'HTTPS'
|
||||
PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS'
|
||||
POOL_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP)
|
||||
LISTENER_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP,
|
||||
PROTOCOL_TERMINATED_HTTPS)
|
||||
|
||||
LISTENER_POOL_COMPATIBLE_PROTOCOLS = (
|
||||
(PROTOCOL_TCP, PROTOCOL_TCP),
|
||||
(PROTOCOL_HTTP, PROTOCOL_HTTP),
|
||||
(PROTOCOL_HTTPS, PROTOCOL_HTTPS),
|
||||
(PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS))
|
||||
|
||||
|
||||
HEALTH_MONITOR_PING = 'PING'
|
||||
HEALTH_MONITOR_TCP = 'TCP'
|
||||
HEALTH_MONITOR_HTTP = 'HTTP'
|
||||
HEALTH_MONITOR_HTTPS = 'HTTPS'
|
||||
|
||||
SUPPORTED_HEALTH_MONITOR_TYPES = (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS,
|
||||
HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP)
|
||||
|
||||
HTTP_METHOD_GET = 'GET'
|
||||
HTTP_METHOD_HEAD = 'HEAD'
|
||||
HTTP_METHOD_POST = 'POST'
|
||||
HTTP_METHOD_PUT = 'PUT'
|
||||
HTTP_METHOD_DELETE = 'DELETE'
|
||||
HTTP_METHOD_TRACE = 'TRACE'
|
||||
HTTP_METHOD_OPTIONS = 'OPTIONS'
|
||||
HTTP_METHOD_CONNECT = 'CONNECT'
|
||||
HTTP_METHOD_PATCH = 'PATCH'
|
||||
|
||||
|
||||
SUPPORTED_HTTP_METHODS = (HTTP_METHOD_GET, HTTP_METHOD_HEAD, HTTP_METHOD_POST,
|
||||
HTTP_METHOD_PUT, HTTP_METHOD_DELETE,
|
||||
HTTP_METHOD_TRACE, HTTP_METHOD_OPTIONS,
|
||||
HTTP_METHOD_CONNECT, HTTP_METHOD_PATCH)
|
||||
|
||||
# URL path regex according to RFC 3986
|
||||
# Format: path = "/" *( "/" segment )
|
||||
# segment = *pchar
|
||||
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
# pct-encoded = "%" HEXDIG HEXDIG
|
||||
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
# / "*" / "+" / "," / ";" / "="
|
||||
SUPPORTED_URL_PATH = (
|
||||
"^(/([a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2}))*)+$")
|
||||
|
||||
SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
|
||||
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
|
||||
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
|
||||
SUPPORTED_SP_TYPES = (SESSION_PERSISTENCE_SOURCE_IP,
|
||||
SESSION_PERSISTENCE_HTTP_COOKIE,
|
||||
SESSION_PERSISTENCE_APP_COOKIE)
|
||||
|
||||
L7_RULE_TYPE_HOST_NAME = 'HOST_NAME'
|
||||
L7_RULE_TYPE_PATH = 'PATH'
|
||||
L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE'
|
||||
L7_RULE_TYPE_HEADER = 'HEADER'
|
||||
L7_RULE_TYPE_COOKIE = 'COOKIE'
|
||||
SUPPORTED_L7_RULE_TYPES = (L7_RULE_TYPE_HOST_NAME,
|
||||
L7_RULE_TYPE_PATH,
|
||||
L7_RULE_TYPE_FILE_TYPE,
|
||||
L7_RULE_TYPE_HEADER,
|
||||
L7_RULE_TYPE_COOKIE)
|
||||
|
||||
L7_RULE_COMPARE_TYPE_REGEX = 'REGEX'
|
||||
L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH'
|
||||
L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH'
|
||||
L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS'
|
||||
L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO'
|
||||
SUPPORTED_L7_RULE_COMPARE_TYPES = (L7_RULE_COMPARE_TYPE_REGEX,
|
||||
L7_RULE_COMPARE_TYPE_STARTS_WITH,
|
||||
L7_RULE_COMPARE_TYPE_ENDS_WITH,
|
||||
L7_RULE_COMPARE_TYPE_CONTAINS,
|
||||
L7_RULE_COMPARE_TYPE_EQUAL_TO)
|
||||
|
||||
L7_POLICY_ACTION_REJECT = 'REJECT'
|
||||
L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
|
||||
L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL'
|
||||
SUPPORTED_L7_POLICY_ACTIONS = (L7_POLICY_ACTION_REJECT,
|
||||
L7_POLICY_ACTION_REDIRECT_TO_POOL,
|
||||
L7_POLICY_ACTION_REDIRECT_TO_URL)
|
||||
|
||||
URL_REGEX = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|\
|
||||
(?:%[0-9a-fA-F][0-9a-fA-F]))+"
|
||||
|
||||
# See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a
|
||||
# http header or cookie name.
|
||||
HTTP_HEADER_COOKIE_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z'
|
||||
|
||||
# See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value.
|
||||
HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z'
|
||||
|
||||
# See RFC 7230: Should match characters valid in a header value.
|
||||
HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9'
|
||||
r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z')
|
||||
|
||||
# Also in RFC 7230: Should match characters valid in a header value
|
||||
# when quoted with double quotes.
|
||||
HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t'
|
||||
r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z')
|
||||
|
||||
STATS_ACTIVE_CONNECTIONS = 'active_connections'
|
||||
STATS_MAX_CONNECTIONS = 'max_connections'
|
||||
STATS_TOTAL_CONNECTIONS = 'total_connections'
|
||||
STATS_CURRENT_SESSIONS = 'current_sessions'
|
||||
STATS_MAX_SESSIONS = 'max_sessions'
|
||||
STATS_TOTAL_SESSIONS = 'total_sessions'
|
||||
STATS_IN_BYTES = 'bytes_in'
|
||||
STATS_OUT_BYTES = 'bytes_out'
|
||||
STATS_CONNECTION_ERRORS = 'connection_errors'
|
||||
STATS_RESPONSE_ERRORS = 'response_errors'
|
||||
STATS_STATUS = 'status'
|
||||
STATS_HEALTH = 'health'
|
||||
STATS_FAILED_CHECKS = 'failed_checks'
|
||||
|
||||
# Constants to extend status strings in neutron.plugins.common.constants
|
||||
ONLINE = 'ONLINE'
|
||||
OFFLINE = 'OFFLINE'
|
||||
DEGRADED = 'DEGRADED'
|
||||
DISABLED = 'DISABLED'
|
||||
NO_MONITOR = 'NO_MONITOR'
|
||||
OPERATING_STATUSES = (ONLINE, OFFLINE, DEGRADED, DISABLED, NO_MONITOR)
|
||||
|
||||
NO_CHECK = 'no check'
|
||||
|
||||
# LBaaS V2 Agent Constants
|
||||
LBAAS_AGENT_SCHEDULER_V2_EXT_ALIAS = 'lbaas_agent_schedulerv2'
|
||||
AGENT_TYPE_LOADBALANCERV2 = 'Loadbalancerv2 agent'
|
||||
LOADBALANCER_PLUGINV2 = 'n-lbaasv2-plugin'
|
||||
LOADBALANCER_AGENTV2 = 'n-lbaasv2_agent'
|
||||
|
||||
LOADBALANCER = "LOADBALANCER"
|
||||
LOADBALANCERV2 = "LOADBALANCERV2"
|
||||
|
||||
# Used to check number of connections per second allowed
|
||||
# for the LBaaS V1 vip and LBaaS V2 listeners. -1 indicates
|
||||
# no limit, the value cannot be less than -1.
|
||||
MIN_CONNECT_VALUE = -1
|
||||
|
||||
# LBaas V2 Table entities
|
||||
LISTENER_EVENT = 'listener'
|
||||
LISTENER_STATS_EVENT = 'listener_stats'
|
||||
LOADBALANCER_EVENT = 'loadbalancer'
|
||||
LOADBALANCER_STATS_EVENT = 'loadbalancer_stats'
|
||||
MEMBER_EVENT = 'member'
|
||||
OPERATING_STATUS = 'operating_status'
|
||||
POOL_EVENT = 'pool'
|
885
vmware_nsx/tests/unit/services/lbaas/lb_data_models.py
Normal file
885
vmware_nsx/tests/unit/services/lbaas/lb_data_models.py
Normal file
@ -0,0 +1,885 @@
|
||||
# Copyright 2019 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
This module holds the data models for the load balancer service plugin. These
|
||||
are meant simply as replacement data structures for dictionaries and
|
||||
SQLAlchemy models. Using dictionaries as data containers for many components
|
||||
causes readability issues and does not intuitively give the benefits of what
|
||||
classes and OO give. Using SQLAlchemy models as data containers for many
|
||||
components can become an issue if you do not want to give certain components
|
||||
access to the database.
|
||||
|
||||
These data models do provide methods for instantiation from SQLAlchemy models
|
||||
and also converting to dictionaries.
|
||||
"""
|
||||
|
||||
from neutron.db.models import servicetype as servicetype_db
|
||||
from neutron.db import models_v2
|
||||
from neutron_lib.db import model_base
|
||||
import six
|
||||
from sqlalchemy.ext import orderinglist
|
||||
from sqlalchemy.orm import collections
|
||||
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_db_models as models
|
||||
|
||||
L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL'
|
||||
HEALTH_MONITOR_PING = 'PING'
|
||||
HEALTH_MONITOR_TCP = 'TCP'
|
||||
|
||||
|
||||
class BaseDataModel(object):
|
||||
|
||||
# NOTE(ihrachys): we could reuse the list to provide a default __init__
|
||||
# implementation. That would require handling custom default values though.
|
||||
fields = []
|
||||
|
||||
def to_dict(self, **kwargs):
|
||||
ret = {}
|
||||
for attr in self.__dict__:
|
||||
if attr.startswith('_') or not kwargs.get(attr, True):
|
||||
continue
|
||||
value = self.__dict__[attr]
|
||||
if isinstance(getattr(self, attr), list):
|
||||
ret[attr] = []
|
||||
for item in value:
|
||||
if isinstance(item, BaseDataModel):
|
||||
ret[attr].append(item.to_dict())
|
||||
else:
|
||||
ret[attr] = item
|
||||
elif isinstance(getattr(self, attr), BaseDataModel):
|
||||
ret[attr] = value.to_dict()
|
||||
elif six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
def to_api_dict(self, **kwargs):
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
fields = {k: v for k, v in model_dict.items()
|
||||
if k in cls.fields}
|
||||
return cls(**fields)
|
||||
|
||||
@classmethod
|
||||
def from_sqlalchemy_model(cls, sa_model, calling_classes=None):
|
||||
calling_classes = calling_classes or []
|
||||
attr_mapping = vars(cls).get("attr_mapping")
|
||||
instance = cls()
|
||||
for attr_name in cls.fields:
|
||||
if attr_name.startswith('_'):
|
||||
continue
|
||||
if attr_mapping and attr_name in attr_mapping.keys():
|
||||
attr = getattr(sa_model, attr_mapping[attr_name])
|
||||
elif hasattr(sa_model, attr_name):
|
||||
attr = getattr(sa_model, attr_name)
|
||||
else:
|
||||
continue
|
||||
# Handles M:1 or 1:1 relationships
|
||||
if isinstance(attr, model_base.BASEV2):
|
||||
if hasattr(instance, attr_name):
|
||||
data_class = SA_MODEL_TO_DATA_MODEL_MAP[attr.__class__]
|
||||
# Don't recurse down object classes too far. If we have
|
||||
# seen the same object class more than twice, we are
|
||||
# probably in a loop.
|
||||
if data_class and calling_classes.count(data_class) < 2:
|
||||
setattr(instance, attr_name,
|
||||
data_class.from_sqlalchemy_model(
|
||||
attr,
|
||||
calling_classes=calling_classes + [cls]))
|
||||
# Handles 1:M or N:M relationships
|
||||
elif (isinstance(attr, collections.InstrumentedList) or
|
||||
isinstance(attr, orderinglist.OrderingList)):
|
||||
for item in attr:
|
||||
if hasattr(instance, attr_name):
|
||||
data_class = SA_MODEL_TO_DATA_MODEL_MAP[item.__class__]
|
||||
# Don't recurse down object classes too far. If we have
|
||||
# seen the same object class more than twice, we are
|
||||
# probably in a loop.
|
||||
if (data_class and
|
||||
calling_classes.count(data_class) < 2):
|
||||
attr_list = getattr(instance, attr_name) or []
|
||||
attr_list.append(data_class.from_sqlalchemy_model(
|
||||
item, calling_classes=calling_classes + [cls]))
|
||||
setattr(instance, attr_name, attr_list)
|
||||
# This isn't a relationship so it must be a "primitive"
|
||||
else:
|
||||
setattr(instance, attr_name, attr)
|
||||
return instance
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
"""Returns the loadbalancer this instance is attached to."""
|
||||
if isinstance(self, LoadBalancer):
|
||||
lb = self
|
||||
elif isinstance(self, Listener):
|
||||
lb = self.loadbalancer
|
||||
elif isinstance(self, L7Policy):
|
||||
lb = self.listener.loadbalancer
|
||||
elif isinstance(self, L7Rule):
|
||||
lb = self.policy.listener.loadbalancer
|
||||
elif isinstance(self, Pool):
|
||||
lb = self.loadbalancer
|
||||
elif isinstance(self, SNI):
|
||||
lb = self.listener.loadbalancer
|
||||
else:
|
||||
# Pool Member or Health Monitor
|
||||
lb = self.pool.loadbalancer
|
||||
return lb
|
||||
|
||||
|
||||
# NOTE(brandon-logan) AllocationPool, HostRoute, Subnet, IPAllocation, Port,
|
||||
# and ProviderResourceAssociation are defined here because there aren't any
|
||||
# data_models defined in core neutron or neutron services. Instead of jumping
|
||||
# through the hoops to create those I've just defined them here. If ever
|
||||
# data_models or similar are defined in those packages, those should be used
|
||||
# instead of these.
|
||||
class AllocationPool(BaseDataModel):
|
||||
|
||||
fields = ['start', 'end']
|
||||
|
||||
def __init__(self, start=None, end=None):
|
||||
self.start = start
|
||||
self.end = end
|
||||
|
||||
|
||||
class HostRoute(BaseDataModel):
|
||||
|
||||
fields = ['destination', 'nexthop']
|
||||
|
||||
def __init__(self, destination=None, nexthop=None):
|
||||
self.destination = destination
|
||||
self.nexthop = nexthop
|
||||
|
||||
|
||||
class Network(BaseDataModel):
|
||||
|
||||
fields = ['id', 'name', 'description', 'mtu']
|
||||
|
||||
def __init__(self, id=None, name=None, description=None, mtu=None):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.mtu = mtu
|
||||
|
||||
|
||||
class Subnet(BaseDataModel):
|
||||
|
||||
fields = ['id', 'name', 'tenant_id', 'network_id', 'ip_version', 'cidr',
|
||||
'gateway_ip', 'enable_dhcp', 'ipv6_ra_mode', 'ipv6_address_mode',
|
||||
'shared', 'dns_nameservers', 'host_routes', 'allocation_pools',
|
||||
'subnetpool_id']
|
||||
|
||||
def __init__(self, id=None, name=None, tenant_id=None, network_id=None,
|
||||
ip_version=None, cidr=None, gateway_ip=None, enable_dhcp=None,
|
||||
ipv6_ra_mode=None, ipv6_address_mode=None, shared=None,
|
||||
dns_nameservers=None, host_routes=None, allocation_pools=None,
|
||||
subnetpool_id=None):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.tenant_id = tenant_id
|
||||
self.network_id = network_id
|
||||
self.ip_version = ip_version
|
||||
self.cidr = cidr
|
||||
self.gateway_ip = gateway_ip
|
||||
self.enable_dhcp = enable_dhcp
|
||||
self.ipv6_ra_mode = ipv6_ra_mode
|
||||
self.ipv6_address_mode = ipv6_address_mode
|
||||
self.shared = shared
|
||||
self.dns_nameservers = dns_nameservers
|
||||
self.host_routes = host_routes
|
||||
self.allocation_pools = allocation_pools
|
||||
self.subnetpool_id = subnetpool_id
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
host_routes = model_dict.pop('host_routes', [])
|
||||
allocation_pools = model_dict.pop('allocation_pools', [])
|
||||
model_dict['host_routes'] = [HostRoute.from_dict(route)
|
||||
for route in host_routes]
|
||||
model_dict['allocation_pools'] = [AllocationPool.from_dict(ap)
|
||||
for ap in allocation_pools]
|
||||
return super(Subnet, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class IPAllocation(BaseDataModel):
|
||||
|
||||
fields = ['port_id', 'ip_address', 'subnet_id', 'network_id']
|
||||
|
||||
def __init__(self, port_id=None, ip_address=None, subnet_id=None,
|
||||
network_id=None):
|
||||
self.port_id = port_id
|
||||
self.ip_address = ip_address
|
||||
self.subnet_id = subnet_id
|
||||
self.network_id = network_id
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
subnet = model_dict.pop('subnet', None)
|
||||
# TODO(blogan): add subnet to __init__. Can't do it yet because it
|
||||
# causes issues with converting SA models into data models.
|
||||
instance = super(IPAllocation, cls).from_dict(model_dict)
|
||||
setattr(instance, 'subnet', None)
|
||||
if subnet:
|
||||
setattr(instance, 'subnet', Subnet.from_dict(subnet))
|
||||
return instance
|
||||
|
||||
|
||||
class Port(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'name', 'network_id', 'mac_address',
|
||||
'admin_state_up', 'status', 'device_id', 'device_owner',
|
||||
'fixed_ips', 'network']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
|
||||
mac_address=None, admin_state_up=None, status=None,
|
||||
device_id=None, device_owner=None, fixed_ips=None,
|
||||
network=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.network_id = network_id
|
||||
self.mac_address = mac_address
|
||||
self.admin_state_up = admin_state_up
|
||||
self.status = status
|
||||
self.device_id = device_id
|
||||
self.device_owner = device_owner
|
||||
self.fixed_ips = fixed_ips or []
|
||||
self.network = network
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
fixed_ips = model_dict.pop('fixed_ips', [])
|
||||
model_dict['fixed_ips'] = [IPAllocation.from_dict(fixed_ip)
|
||||
for fixed_ip in fixed_ips]
|
||||
if model_dict.get('network'):
|
||||
network_dict = model_dict.pop('network')
|
||||
model_dict['network'] = Network.from_dict(network_dict)
|
||||
return super(Port, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class ProviderResourceAssociation(BaseDataModel):
|
||||
|
||||
fields = ['provider_name', 'resource_id']
|
||||
|
||||
def __init__(self, provider_name=None, resource_id=None):
|
||||
self.provider_name = provider_name
|
||||
self.resource_id = resource_id
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
device_driver = model_dict.pop('device_driver', None)
|
||||
instance = super(ProviderResourceAssociation, cls).from_dict(
|
||||
model_dict)
|
||||
setattr(instance, 'device_driver', device_driver)
|
||||
return instance
|
||||
|
||||
|
||||
class SessionPersistence(BaseDataModel):
|
||||
|
||||
fields = ['pool_id', 'type', 'cookie_name', 'pool']
|
||||
|
||||
def __init__(self, pool_id=None, type=None, cookie_name=None,
|
||||
pool=None):
|
||||
self.pool_id = pool_id
|
||||
self.type = type
|
||||
self.cookie_name = cookie_name
|
||||
self.pool = pool
|
||||
|
||||
def to_api_dict(self):
|
||||
return super(SessionPersistence, self).to_dict(pool=False,
|
||||
pool_id=False)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
pool = model_dict.pop('pool', None)
|
||||
if pool:
|
||||
model_dict['pool'] = Pool.from_dict(
|
||||
pool)
|
||||
return super(SessionPersistence, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class LoadBalancerStatistics(BaseDataModel):
|
||||
|
||||
fields = ['loadbalancer_id', 'bytes_in', 'bytes_out', 'active_connections',
|
||||
'total_connections', 'loadbalancer']
|
||||
|
||||
def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None,
|
||||
active_connections=None, total_connections=None,
|
||||
loadbalancer=None):
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.bytes_in = bytes_in
|
||||
self.bytes_out = bytes_out
|
||||
self.active_connections = active_connections
|
||||
self.total_connections = total_connections
|
||||
self.loadbalancer = loadbalancer
|
||||
|
||||
def to_api_dict(self):
|
||||
return super(LoadBalancerStatistics, self).to_dict(
|
||||
loadbalancer_id=False, loadbalancer=False)
|
||||
|
||||
|
||||
class HealthMonitor(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries',
|
||||
'http_method', 'url_path', 'expected_codes',
|
||||
'provisioning_status', 'admin_state_up', 'pool', 'name',
|
||||
'max_retries_down']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, type=None, delay=None,
|
||||
timeout=None, max_retries=None, http_method=None,
|
||||
url_path=None, expected_codes=None, provisioning_status=None,
|
||||
admin_state_up=None, pool=None, name=None,
|
||||
max_retries_down=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.type = type
|
||||
self.delay = delay
|
||||
self.timeout = timeout
|
||||
self.max_retries = max_retries
|
||||
self.http_method = http_method
|
||||
self.url_path = url_path
|
||||
self.expected_codes = expected_codes
|
||||
self.provisioning_status = provisioning_status
|
||||
self.admin_state_up = admin_state_up
|
||||
self.pool = pool
|
||||
self.name = name
|
||||
self.max_retries_down = max_retries_down
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.pool and self.pool.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
ret_dict = super(HealthMonitor, self).to_dict(
|
||||
provisioning_status=False, pool=False)
|
||||
ret_dict['pools'] = []
|
||||
if self.pool:
|
||||
ret_dict['pools'].append({'id': self.pool.id})
|
||||
if self.type in [HEALTH_MONITOR_TCP,
|
||||
HEALTH_MONITOR_PING]:
|
||||
ret_dict.pop('http_method')
|
||||
ret_dict.pop('url_path')
|
||||
ret_dict.pop('expected_codes')
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
pool = model_dict.pop('pool', None)
|
||||
if pool:
|
||||
model_dict['pool'] = Pool.from_dict(
|
||||
pool)
|
||||
return super(HealthMonitor, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class Pool(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'name', 'description', 'healthmonitor_id',
|
||||
'protocol', 'lb_algorithm', 'admin_state_up', 'operating_status',
|
||||
'provisioning_status', 'members', 'healthmonitor',
|
||||
'session_persistence', 'loadbalancer_id', 'loadbalancer',
|
||||
'listener', 'listeners', 'l7_policies']
|
||||
|
||||
# Map deprecated attribute names to new ones.
|
||||
attr_mapping = {'sessionpersistence': 'session_persistence'}
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, description=None,
|
||||
healthmonitor_id=None, protocol=None, lb_algorithm=None,
|
||||
admin_state_up=None, operating_status=None,
|
||||
provisioning_status=None, members=None, healthmonitor=None,
|
||||
session_persistence=None, loadbalancer_id=None,
|
||||
loadbalancer=None, listener=None, listeners=None,
|
||||
l7_policies=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.healthmonitor_id = healthmonitor_id
|
||||
self.protocol = protocol
|
||||
self.lb_algorithm = lb_algorithm
|
||||
self.admin_state_up = admin_state_up
|
||||
self.operating_status = operating_status
|
||||
self.provisioning_status = provisioning_status
|
||||
self.members = members or []
|
||||
self.healthmonitor = healthmonitor
|
||||
self.session_persistence = session_persistence
|
||||
# NOTE(eezhova): Old attribute name is kept for backwards
|
||||
# compatibility with out-of-tree drivers.
|
||||
self.sessionpersistence = self.session_persistence
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.loadbalancer = loadbalancer
|
||||
self.listener = listener
|
||||
self.listeners = listeners or []
|
||||
self.l7_policies = l7_policies or []
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
ret_dict = super(Pool, self).to_dict(
|
||||
provisioning_status=False, operating_status=False,
|
||||
healthmonitor=False, session_persistence=False,
|
||||
loadbalancer_id=False, loadbalancer=False, listener_id=False)
|
||||
ret_dict['loadbalancers'] = []
|
||||
if self.loadbalancer:
|
||||
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
|
||||
ret_dict['session_persistence'] = None
|
||||
if self.session_persistence:
|
||||
ret_dict['session_persistence'] = (
|
||||
self.session_persistence.to_api_dict())
|
||||
ret_dict['members'] = [{'id': member.id} for member in self.members]
|
||||
ret_dict['listeners'] = [{'id': listener.id}
|
||||
for listener in self.listeners]
|
||||
if self.listener:
|
||||
ret_dict['listener_id'] = self.listener.id
|
||||
else:
|
||||
ret_dict['listener_id'] = None
|
||||
ret_dict['l7_policies'] = [{'id': l7_policy.id}
|
||||
for l7_policy in self.l7_policies]
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
healthmonitor = model_dict.pop('healthmonitor', None)
|
||||
session_persistence = model_dict.pop('session_persistence', None)
|
||||
model_dict.pop('sessionpersistence', None)
|
||||
loadbalancer = model_dict.pop('loadbalancer', None)
|
||||
members = model_dict.pop('members', [])
|
||||
model_dict['members'] = [Member.from_dict(member)
|
||||
for member in members]
|
||||
listeners = model_dict.pop('listeners', [])
|
||||
model_dict['listeners'] = [Listener.from_dict(listener)
|
||||
for listener in listeners]
|
||||
l7_policies = model_dict.pop('l7_policies', [])
|
||||
model_dict['l7_policies'] = [L7Policy.from_dict(policy)
|
||||
for policy in l7_policies]
|
||||
|
||||
if healthmonitor:
|
||||
model_dict['healthmonitor'] = HealthMonitor.from_dict(
|
||||
healthmonitor)
|
||||
if session_persistence:
|
||||
model_dict['session_persistence'] = SessionPersistence.from_dict(
|
||||
session_persistence)
|
||||
if loadbalancer:
|
||||
model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer)
|
||||
return super(Pool, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class Member(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'pool_id', 'address', 'protocol_port',
|
||||
'weight', 'admin_state_up', 'subnet_id', 'operating_status',
|
||||
'provisioning_status', 'pool', 'name']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, pool_id=None, address=None,
|
||||
protocol_port=None, weight=None, admin_state_up=None,
|
||||
subnet_id=None, operating_status=None,
|
||||
provisioning_status=None, pool=None, name=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.pool_id = pool_id
|
||||
self.address = address
|
||||
self.protocol_port = protocol_port
|
||||
self.weight = weight
|
||||
self.admin_state_up = admin_state_up
|
||||
self.subnet_id = subnet_id
|
||||
self.operating_status = operating_status
|
||||
self.provisioning_status = provisioning_status
|
||||
self.pool = pool
|
||||
self.name = name
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.pool and self.pool.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
return super(Member, self).to_dict(
|
||||
provisioning_status=False, operating_status=False, pool=False)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
pool = model_dict.pop('pool', None)
|
||||
if pool:
|
||||
model_dict['pool'] = Pool.from_dict(
|
||||
pool)
|
||||
return super(Member, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class SNI(BaseDataModel):
|
||||
|
||||
fields = ['listener_id', 'tls_container_id', 'position', 'listener']
|
||||
|
||||
def __init__(self, listener_id=None, tls_container_id=None,
|
||||
position=None, listener=None):
|
||||
self.listener_id = listener_id
|
||||
self.tls_container_id = tls_container_id
|
||||
self.position = position
|
||||
self.listener = listener
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.listener and self.listener.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
return super(SNI, self).to_dict(listener=False)
|
||||
|
||||
|
||||
class TLSContainer(BaseDataModel):
|
||||
|
||||
fields = ['id', 'certificate', 'private_key', 'passphrase',
|
||||
'intermediates', 'primary_cn']
|
||||
|
||||
def __init__(self, id=None, certificate=None, private_key=None,
|
||||
passphrase=None, intermediates=None, primary_cn=None):
|
||||
self.id = id
|
||||
self.certificate = certificate
|
||||
self.private_key = private_key
|
||||
self.passphrase = passphrase
|
||||
self.intermediates = intermediates
|
||||
self.primary_cn = primary_cn
|
||||
|
||||
|
||||
class L7Rule(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'l7policy_id', 'type', 'compare_type',
|
||||
'invert', 'key', 'value', 'provisioning_status',
|
||||
'admin_state_up', 'policy']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None,
|
||||
l7policy_id=None, type=None, compare_type=None, invert=None,
|
||||
key=None, value=None, provisioning_status=None,
|
||||
admin_state_up=None, policy=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.l7policy_id = l7policy_id
|
||||
self.type = type
|
||||
self.compare_type = compare_type
|
||||
self.invert = invert
|
||||
self.key = key
|
||||
self.value = value
|
||||
self.provisioning_status = provisioning_status
|
||||
self.admin_state_up = admin_state_up
|
||||
self.policy = policy
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.policy.listener.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
ret_dict = super(L7Rule, self).to_dict(
|
||||
provisioning_status=False,
|
||||
policy=False, l7policy_id=False)
|
||||
ret_dict['policies'] = []
|
||||
if self.policy:
|
||||
ret_dict['policies'].append({'id': self.policy.id})
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
policy = model_dict.pop('policy', None)
|
||||
if policy:
|
||||
model_dict['policy'] = L7Policy.from_dict(policy)
|
||||
return super(L7Rule, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class L7Policy(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'name', 'description', 'listener_id',
|
||||
'action', 'redirect_pool_id', 'redirect_url', 'position',
|
||||
'admin_state_up', 'provisioning_status', 'listener', 'rules',
|
||||
'redirect_pool']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, description=None,
|
||||
listener_id=None, action=None, redirect_pool_id=None,
|
||||
redirect_url=None, position=None,
|
||||
admin_state_up=None, provisioning_status=None,
|
||||
listener=None, rules=None, redirect_pool=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.listener_id = listener_id
|
||||
self.action = action
|
||||
self.redirect_pool_id = redirect_pool_id
|
||||
self.redirect_pool = redirect_pool
|
||||
self.redirect_url = redirect_url
|
||||
self.position = position
|
||||
self.admin_state_up = admin_state_up
|
||||
self.provisioning_status = provisioning_status
|
||||
self.listener = listener
|
||||
self.rules = rules or []
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.listener.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
ret_dict = super(L7Policy, self).to_dict(
|
||||
listener=False, listener_id=True,
|
||||
provisioning_status=False, redirect_pool=False)
|
||||
ret_dict['listeners'] = []
|
||||
if self.listener:
|
||||
ret_dict['listeners'].append({'id': self.listener.id})
|
||||
ret_dict['rules'] = [{'id': rule.id} for rule in self.rules]
|
||||
if ret_dict.get('action') == L7_POLICY_ACTION_REDIRECT_TO_POOL:
|
||||
del ret_dict['redirect_url']
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
listener = model_dict.pop('listener', None)
|
||||
redirect_pool = model_dict.pop('redirect_pool', None)
|
||||
rules = model_dict.pop('rules', [])
|
||||
if listener:
|
||||
model_dict['listener'] = Listener.from_dict(listener)
|
||||
if redirect_pool:
|
||||
model_dict['redirect_pool'] = Pool.from_dict(redirect_pool)
|
||||
model_dict['rules'] = [L7Rule.from_dict(rule)
|
||||
for rule in rules]
|
||||
return super(L7Policy, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class Listener(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'name', 'description', 'default_pool_id',
|
||||
'loadbalancer_id', 'protocol', 'default_tls_container_id',
|
||||
'sni_containers', 'protocol_port', 'connection_limit',
|
||||
'admin_state_up', 'provisioning_status', 'operating_status',
|
||||
'default_pool', 'loadbalancer', 'l7_policies']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, description=None,
|
||||
default_pool_id=None, loadbalancer_id=None, protocol=None,
|
||||
default_tls_container_id=None, sni_containers=None,
|
||||
protocol_port=None, connection_limit=None,
|
||||
admin_state_up=None, provisioning_status=None,
|
||||
operating_status=None, default_pool=None, loadbalancer=None,
|
||||
l7_policies=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.default_pool_id = default_pool_id
|
||||
self.loadbalancer_id = loadbalancer_id
|
||||
self.protocol = protocol
|
||||
self.default_tls_container_id = default_tls_container_id
|
||||
self.sni_containers = sni_containers or []
|
||||
self.protocol_port = protocol_port
|
||||
self.connection_limit = connection_limit
|
||||
self.admin_state_up = admin_state_up
|
||||
self.operating_status = operating_status
|
||||
self.provisioning_status = provisioning_status
|
||||
self.default_pool = default_pool
|
||||
self.loadbalancer = loadbalancer
|
||||
self.l7_policies = l7_policies or []
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return bool(self.loadbalancer)
|
||||
|
||||
def to_api_dict(self):
|
||||
ret_dict = super(Listener, self).to_dict(
|
||||
loadbalancer=False, loadbalancer_id=False, default_pool=False,
|
||||
operating_status=False, provisioning_status=False,
|
||||
sni_containers=False, default_tls_container=False)
|
||||
# NOTE(blogan): Returning a list to future proof for M:N objects
|
||||
# that are not yet implemented.
|
||||
ret_dict['loadbalancers'] = []
|
||||
if self.loadbalancer:
|
||||
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
|
||||
ret_dict['sni_container_refs'] = [container.tls_container_id
|
||||
for container in self.sni_containers]
|
||||
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
|
||||
del ret_dict['l7_policies']
|
||||
ret_dict['l7policies'] = [{'id': l7_policy.id}
|
||||
for l7_policy in self.l7_policies]
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
default_pool = model_dict.pop('default_pool', None)
|
||||
loadbalancer = model_dict.pop('loadbalancer', None)
|
||||
sni_containers = model_dict.pop('sni_containers', [])
|
||||
model_dict['sni_containers'] = [SNI.from_dict(sni)
|
||||
for sni in sni_containers]
|
||||
l7_policies = model_dict.pop('l7_policies', [])
|
||||
if default_pool:
|
||||
model_dict['default_pool'] = Pool.from_dict(default_pool)
|
||||
if loadbalancer:
|
||||
model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer)
|
||||
model_dict['l7_policies'] = [L7Policy.from_dict(policy)
|
||||
for policy in l7_policies]
|
||||
return super(Listener, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
class LoadBalancer(BaseDataModel):
|
||||
|
||||
fields = ['id', 'tenant_id', 'name', 'description', 'vip_subnet_id',
|
||||
'vip_port_id', 'vip_address', 'provisioning_status',
|
||||
'operating_status', 'admin_state_up', 'vip_port', 'stats',
|
||||
'provider', 'listeners', 'pools', 'flavor_id']
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, description=None,
|
||||
vip_subnet_id=None, vip_port_id=None, vip_address=None,
|
||||
provisioning_status=None, operating_status=None,
|
||||
admin_state_up=None, vip_port=None, stats=None,
|
||||
provider=None, listeners=None, pools=None, flavor_id=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.vip_subnet_id = vip_subnet_id
|
||||
self.vip_port_id = vip_port_id
|
||||
self.vip_address = vip_address
|
||||
self.operating_status = operating_status
|
||||
self.provisioning_status = provisioning_status
|
||||
self.admin_state_up = admin_state_up
|
||||
self.vip_port = vip_port
|
||||
self.stats = stats
|
||||
self.provider = provider
|
||||
self.listeners = listeners or []
|
||||
self.flavor_id = flavor_id
|
||||
self.pools = pools or []
|
||||
|
||||
def attached_to_loadbalancer(self):
|
||||
return True
|
||||
|
||||
def _construct_full_graph_api_dict(self):
|
||||
api_listeners = []
|
||||
for listener in self.listeners:
|
||||
api_listener = listener.to_api_dict()
|
||||
del api_listener['loadbalancers']
|
||||
del api_listener['default_pool_id']
|
||||
if listener.default_pool:
|
||||
api_pool = listener.default_pool.to_api_dict()
|
||||
del api_pool['listeners']
|
||||
del api_pool['listener']
|
||||
del api_pool['listener_id']
|
||||
del api_pool['healthmonitor_id']
|
||||
del api_pool['loadbalancers']
|
||||
del api_pool['l7_policies']
|
||||
del api_pool['sessionpersistence']
|
||||
if listener.default_pool.healthmonitor:
|
||||
api_hm = listener.default_pool.healthmonitor.to_api_dict()
|
||||
del api_hm['pools']
|
||||
api_pool['healthmonitor'] = api_hm
|
||||
api_pool['members'] = []
|
||||
for member in listener.default_pool.members:
|
||||
api_member = member.to_api_dict()
|
||||
del api_member['pool_id']
|
||||
api_pool['members'].append(api_member)
|
||||
api_listener['default_pool'] = api_pool
|
||||
if listener.l7_policies and len(listener.l7_policies) > 0:
|
||||
api_l7policies = []
|
||||
for l7policy in listener.l7_policies:
|
||||
api_l7policy = l7policy.to_api_dict()
|
||||
del api_l7policy['redirect_pool_id']
|
||||
del api_l7policy['listeners']
|
||||
if l7policy.rules and len(l7policy.rules) > 0:
|
||||
api_l7rules = []
|
||||
for l7rule in l7policy.rules:
|
||||
api_l7rule = l7rule.to_api_dict()
|
||||
del api_l7rule['policies']
|
||||
api_l7rules.append(api_l7rule)
|
||||
api_l7policy['rules'] = api_l7rules
|
||||
if l7policy.redirect_pool:
|
||||
api_r_pool = l7policy.redirect_pool.to_api_dict()
|
||||
if l7policy.redirect_pool.healthmonitor:
|
||||
api_r_hm = (l7policy.redirect_pool.healthmonitor.
|
||||
to_api_dict())
|
||||
del api_r_hm['pools']
|
||||
api_r_pool['healthmonitor'] = api_r_hm
|
||||
api_r_pool['members'] = []
|
||||
for r_member in l7policy.redirect_pool.members:
|
||||
api_r_member = r_member.to_api_dict()
|
||||
del api_r_member['pool_id']
|
||||
api_r_pool['members'].append(api_r_member)
|
||||
del api_r_pool['listeners']
|
||||
del api_r_pool['listener']
|
||||
del api_r_pool['listener_id']
|
||||
del api_r_pool['healthmonitor_id']
|
||||
del api_r_pool['loadbalancers']
|
||||
del api_r_pool['l7_policies']
|
||||
del api_r_pool['sessionpersistence']
|
||||
api_l7policy['redirect_pool'] = api_r_pool
|
||||
api_l7policies.append(api_l7policy)
|
||||
api_listener['l7policies'] = api_l7policies
|
||||
api_listeners.append(api_listener)
|
||||
return api_listeners
|
||||
|
||||
def to_api_dict(self, full_graph=False):
|
||||
ret_dict = super(LoadBalancer, self).to_dict(
|
||||
vip_port=False, stats=False, listeners=False)
|
||||
if full_graph:
|
||||
ret_dict['listeners'] = self._construct_full_graph_api_dict()
|
||||
del ret_dict['pools']
|
||||
else:
|
||||
ret_dict['listeners'] = [{'id': listener.id}
|
||||
for listener in self.listeners]
|
||||
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
|
||||
|
||||
if self.provider:
|
||||
ret_dict['provider'] = self.provider.provider_name
|
||||
|
||||
if not self.flavor_id:
|
||||
del ret_dict['flavor_id']
|
||||
|
||||
return ret_dict
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model_dict):
|
||||
listeners = model_dict.pop('listeners', [])
|
||||
pools = model_dict.pop('pools', [])
|
||||
vip_port = model_dict.pop('vip_port', None)
|
||||
provider = model_dict.pop('provider', None)
|
||||
model_dict.pop('stats', None)
|
||||
model_dict['listeners'] = [Listener.from_dict(listener)
|
||||
for listener in listeners]
|
||||
model_dict['pools'] = [Pool.from_dict(pool)
|
||||
for pool in pools]
|
||||
if vip_port:
|
||||
model_dict['vip_port'] = Port.from_dict(vip_port)
|
||||
if provider:
|
||||
model_dict['provider'] = ProviderResourceAssociation.from_dict(
|
||||
provider)
|
||||
return super(LoadBalancer, cls).from_dict(model_dict)
|
||||
|
||||
|
||||
SA_MODEL_TO_DATA_MODEL_MAP = {
|
||||
models.LoadBalancer: LoadBalancer,
|
||||
models.HealthMonitorV2: HealthMonitor,
|
||||
models.Listener: Listener,
|
||||
models.SNI: SNI,
|
||||
models.L7Rule: L7Rule,
|
||||
models.L7Policy: L7Policy,
|
||||
models.PoolV2: Pool,
|
||||
models.MemberV2: Member,
|
||||
models.LoadBalancerStatistics: LoadBalancerStatistics,
|
||||
models.SessionPersistenceV2: SessionPersistence,
|
||||
models_v2.IPAllocation: IPAllocation,
|
||||
models_v2.Port: Port,
|
||||
servicetype_db.ProviderResourceAssociation: ProviderResourceAssociation
|
||||
}
|
||||
|
||||
DATA_MODEL_TO_SA_MODEL_MAP = {
|
||||
LoadBalancer: models.LoadBalancer,
|
||||
HealthMonitor: models.HealthMonitorV2,
|
||||
Listener: models.Listener,
|
||||
SNI: models.SNI,
|
||||
L7Rule: models.L7Rule,
|
||||
L7Policy: models.L7Policy,
|
||||
Pool: models.PoolV2,
|
||||
Member: models.MemberV2,
|
||||
LoadBalancerStatistics: models.LoadBalancerStatistics,
|
||||
SessionPersistence: models.SessionPersistenceV2,
|
||||
IPAllocation: models_v2.IPAllocation,
|
||||
Port: models_v2.Port,
|
||||
ProviderResourceAssociation: servicetype_db.ProviderResourceAssociation
|
||||
}
|
554
vmware_nsx/tests/unit/services/lbaas/lb_db_models.py
Normal file
554
vmware_nsx/tests/unit/services/lbaas/lb_db_models.py
Normal file
@ -0,0 +1,554 @@
|
||||
# Copyright 2019 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from neutron.db.models import servicetype as st_db
|
||||
from neutron.db import models_v2
|
||||
from neutron_lib.db import constants as db_const
|
||||
from neutron_lib.db import model_base
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext import orderinglist
|
||||
from sqlalchemy import orm
|
||||
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_constants as lb_const
|
||||
|
||||
|
||||
class SessionPersistenceV2(model_base.BASEV2):
|
||||
|
||||
NAME = 'session_persistence'
|
||||
|
||||
__tablename__ = "lbaas_sessionpersistences"
|
||||
|
||||
pool_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_pools.id"),
|
||||
primary_key=True,
|
||||
nullable=False)
|
||||
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES,
|
||||
name="lbaas_sesssionpersistences_typev2"),
|
||||
nullable=False)
|
||||
cookie_name = sa.Column(sa.String(1024), nullable=True)
|
||||
|
||||
|
||||
class LoadBalancerStatistics(model_base.BASEV2):
|
||||
"""Represents load balancer statistics."""
|
||||
|
||||
NAME = 'loadbalancer_stats'
|
||||
|
||||
__tablename__ = "lbaas_loadbalancer_statistics"
|
||||
|
||||
loadbalancer_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_loadbalancers.id"),
|
||||
primary_key=True,
|
||||
nullable=False)
|
||||
bytes_in = sa.Column(sa.BigInteger, nullable=False)
|
||||
bytes_out = sa.Column(sa.BigInteger, nullable=False)
|
||||
active_connections = sa.Column(sa.BigInteger, nullable=False)
|
||||
total_connections = sa.Column(sa.BigInteger, nullable=False)
|
||||
|
||||
@orm.validates('bytes_in', 'bytes_out',
|
||||
'active_connections', 'total_connections')
|
||||
def validate_non_negative_int(self, key, value):
|
||||
if value < 0:
|
||||
data = {'key': key, 'value': value}
|
||||
raise ValueError('The %(key)s field can not have '
|
||||
'negative value. '
|
||||
'Current value is %(value)d.' % data)
|
||||
return value
|
||||
|
||||
|
||||
class MemberV2(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents a v2 neutron load balancer member."""
|
||||
|
||||
NAME = 'member'
|
||||
|
||||
__tablename__ = "lbaas_members"
|
||||
|
||||
__table_args__ = (
|
||||
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
|
||||
name='uniq_pool_address_port_v2'),
|
||||
)
|
||||
pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
|
||||
nullable=False)
|
||||
address = sa.Column(sa.String(64), nullable=False)
|
||||
protocol_port = sa.Column(sa.Integer, nullable=False)
|
||||
weight = sa.Column(sa.Integer, nullable=True)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
subnet_id = sa.Column(sa.String(36), nullable=True)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
operating_status = sa.Column(sa.String(16), nullable=False)
|
||||
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True)
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.pool.loadbalancer
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'pool_id', 'address', 'protocol_port', 'weight',
|
||||
'admin_state_up', 'subnet_id', 'name'])
|
||||
|
||||
return ret_dict
|
||||
|
||||
|
||||
class HealthMonitorV2(model_base.BASEV2, model_base.HasId,
|
||||
model_base.HasProject):
|
||||
"""Represents a v2 neutron load balancer healthmonitor."""
|
||||
|
||||
NAME = 'healthmonitor'
|
||||
|
||||
__tablename__ = "lbaas_healthmonitors"
|
||||
|
||||
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES,
|
||||
name="healthmonitors_typev2"),
|
||||
nullable=False)
|
||||
delay = sa.Column(sa.Integer, nullable=False)
|
||||
timeout = sa.Column(sa.Integer, nullable=False)
|
||||
max_retries = sa.Column(sa.Integer, nullable=False)
|
||||
http_method = sa.Column(sa.String(16), nullable=True)
|
||||
url_path = sa.Column(sa.String(255), nullable=True)
|
||||
expected_codes = sa.Column(sa.String(64), nullable=True)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True)
|
||||
max_retries_down = sa.Column(sa.Integer, nullable=True)
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.pool.loadbalancer
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries',
|
||||
'http_method', 'url_path', 'expected_codes', 'admin_state_up',
|
||||
'name', 'max_retries_down'])
|
||||
|
||||
ret_dict['pools'] = []
|
||||
if self.pool:
|
||||
ret_dict['pools'].append({'id': self.pool.id})
|
||||
if self.type in [lb_const.HEALTH_MONITOR_TCP,
|
||||
lb_const.HEALTH_MONITOR_PING]:
|
||||
ret_dict.pop('http_method')
|
||||
ret_dict.pop('url_path')
|
||||
ret_dict.pop('expected_codes')
|
||||
|
||||
return ret_dict
|
||||
|
||||
|
||||
class LoadBalancer(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents a v2 neutron load balancer."""
|
||||
|
||||
NAME = 'loadbalancer'
|
||||
|
||||
__tablename__ = "lbaas_loadbalancers"
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(255))
|
||||
vip_subnet_id = sa.Column(sa.String(36), nullable=False)
|
||||
vip_port_id = sa.Column(sa.String(36), sa.ForeignKey(
|
||||
'ports.id', name='fk_lbaas_loadbalancers_ports_id'))
|
||||
vip_address = sa.Column(sa.String(36))
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
operating_status = sa.Column(sa.String(16), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
vip_port = orm.relationship(models_v2.Port)
|
||||
stats = orm.relationship(
|
||||
LoadBalancerStatistics,
|
||||
uselist=False,
|
||||
backref=orm.backref("loadbalancer", uselist=False),
|
||||
cascade="all, delete-orphan")
|
||||
provider = orm.relationship(
|
||||
st_db.ProviderResourceAssociation,
|
||||
uselist=False,
|
||||
primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id",
|
||||
foreign_keys=[st_db.ProviderResourceAssociation.resource_id],
|
||||
# NOTE(ihrachys) it's not exactly clear why we would need to have the
|
||||
# backref created (and not e.g. just back_populates= link) since we
|
||||
# don't use the reverse property anywhere, but it helps with
|
||||
# accommodating to the new neutron code that automatically detects
|
||||
# obsolete foreign key state and expires affected relationships. The
|
||||
# code is located in neutron/db/api.py and assumes all relationships
|
||||
# should have backrefs.
|
||||
backref='loadbalancer',
|
||||
# this is only for old API backwards compatibility because when a load
|
||||
# balancer is deleted the pool ID should be the same as the load
|
||||
# balancer ID and should not be cleared out in this table
|
||||
viewonly=True)
|
||||
flavor_id = sa.Column(sa.String(36), sa.ForeignKey(
|
||||
'flavors.id', name='fk_lbaas_loadbalancers_flavors_id'))
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'name', 'description',
|
||||
'vip_subnet_id', 'vip_port_id', 'vip_address', 'operating_status',
|
||||
'provisioning_status', 'admin_state_up', 'flavor_id'])
|
||||
ret_dict['listeners'] = [{'id': listener.id}
|
||||
for listener in self.listeners]
|
||||
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
|
||||
|
||||
if self.provider:
|
||||
ret_dict['provider'] = self.provider.provider_name
|
||||
|
||||
if not self.flavor_id:
|
||||
del ret_dict['flavor_id']
|
||||
|
||||
return ret_dict
|
||||
|
||||
|
||||
class PoolV2(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents a v2 neutron load balancer pool."""
|
||||
|
||||
NAME = 'pool'
|
||||
|
||||
__tablename__ = "lbaas_pools"
|
||||
|
||||
name = sa.Column(sa.String(255), nullable=True)
|
||||
description = sa.Column(sa.String(255), nullable=True)
|
||||
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
|
||||
"lbaas_loadbalancers.id"))
|
||||
healthmonitor_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_healthmonitors.id"),
|
||||
unique=True,
|
||||
nullable=True)
|
||||
protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS,
|
||||
name="pool_protocolsv2"),
|
||||
nullable=False)
|
||||
lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS,
|
||||
name="lb_algorithmsv2"),
|
||||
nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
operating_status = sa.Column(sa.String(16), nullable=False)
|
||||
members = orm.relationship(MemberV2,
|
||||
backref=orm.backref("pool", uselist=False),
|
||||
cascade="all, delete-orphan")
|
||||
healthmonitor = orm.relationship(
|
||||
HealthMonitorV2,
|
||||
backref=orm.backref("pool", uselist=False))
|
||||
session_persistence = orm.relationship(
|
||||
SessionPersistenceV2,
|
||||
uselist=False,
|
||||
backref=orm.backref("pool", uselist=False),
|
||||
cascade="all, delete-orphan")
|
||||
loadbalancer = orm.relationship(
|
||||
LoadBalancer, uselist=False,
|
||||
backref=orm.backref("pools", uselist=True))
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.loadbalancer
|
||||
|
||||
# No real relationship here. But we want to fake a pool having a
|
||||
# 'listener_id' sometimes for API back-ward compatibility purposes.
|
||||
@property
|
||||
def listener(self):
|
||||
if self.listeners:
|
||||
return self.listeners[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'name', 'description',
|
||||
'healthmonitor_id', 'protocol', 'lb_algorithm', 'admin_state_up'])
|
||||
|
||||
ret_dict['loadbalancers'] = []
|
||||
if self.loadbalancer:
|
||||
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
|
||||
ret_dict['session_persistence'] = None
|
||||
if self.session_persistence:
|
||||
ret_dict['session_persistence'] = (
|
||||
to_dict(self.session_persistence, [
|
||||
'type', 'cookie_name']))
|
||||
ret_dict['members'] = [{'id': member.id} for member in self.members]
|
||||
ret_dict['listeners'] = [{'id': listener.id}
|
||||
for listener in self.listeners]
|
||||
if self.listener:
|
||||
ret_dict['listener_id'] = self.listener.id
|
||||
else:
|
||||
ret_dict['listener_id'] = None
|
||||
ret_dict['l7_policies'] = [{'id': l7_policy.id}
|
||||
for l7_policy in self.l7_policies]
|
||||
return ret_dict
|
||||
|
||||
|
||||
class SNI(model_base.BASEV2):
|
||||
|
||||
"""Many-to-many association between Listener and TLS container ids
|
||||
Making the SNI certificates list, ordered using the position
|
||||
"""
|
||||
|
||||
NAME = 'sni'
|
||||
|
||||
__tablename__ = "lbaas_sni"
|
||||
|
||||
listener_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_listeners.id"),
|
||||
primary_key=True,
|
||||
nullable=False)
|
||||
tls_container_id = sa.Column(sa.String(128),
|
||||
primary_key=True,
|
||||
nullable=False)
|
||||
position = sa.Column(sa.Integer)
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.listener.loadbalancer
|
||||
|
||||
|
||||
class L7Rule(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents L7 Rule."""
|
||||
|
||||
NAME = 'l7rule'
|
||||
|
||||
__tablename__ = "lbaas_l7rules"
|
||||
|
||||
l7policy_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_l7policies.id"),
|
||||
nullable=False)
|
||||
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_TYPES,
|
||||
name="l7rule_typesv2"),
|
||||
nullable=False)
|
||||
compare_type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_COMPARE_TYPES,
|
||||
name="l7rule_compare_typev2"),
|
||||
nullable=False)
|
||||
invert = sa.Column(sa.Boolean(), nullable=False)
|
||||
key = sa.Column(sa.String(255), nullable=True)
|
||||
value = sa.Column(sa.String(255), nullable=False)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.policy.listener.loadbalancer
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'type', 'compare_type', 'invert', 'key',
|
||||
'value', 'admin_state_up'])
|
||||
|
||||
ret_dict['policies'] = []
|
||||
if self.policy:
|
||||
ret_dict['policies'].append({'id': self.policy.id})
|
||||
return ret_dict
|
||||
|
||||
|
||||
class L7Policy(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents L7 Policy."""
|
||||
|
||||
NAME = 'l7policy'
|
||||
|
||||
__tablename__ = "lbaas_l7policies"
|
||||
|
||||
name = sa.Column(sa.String(255), nullable=True)
|
||||
description = sa.Column(sa.String(255), nullable=True)
|
||||
listener_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_listeners.id"),
|
||||
nullable=False)
|
||||
action = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_POLICY_ACTIONS,
|
||||
name="l7policy_action_typesv2"),
|
||||
nullable=False)
|
||||
redirect_pool_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("lbaas_pools.id"),
|
||||
nullable=True)
|
||||
redirect_url = sa.Column(sa.String(255),
|
||||
nullable=True)
|
||||
position = sa.Column(sa.Integer, nullable=False)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
rules = orm.relationship(
|
||||
L7Rule,
|
||||
uselist=True,
|
||||
primaryjoin="L7Policy.id==L7Rule.l7policy_id",
|
||||
foreign_keys=[L7Rule.l7policy_id],
|
||||
cascade="all, delete-orphan",
|
||||
backref=orm.backref("policy")
|
||||
)
|
||||
redirect_pool = orm.relationship(
|
||||
PoolV2, backref=orm.backref("l7_policies", uselist=True))
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.listener.loadbalancer
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'name', 'description', 'listener_id', 'action',
|
||||
'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up'])
|
||||
|
||||
ret_dict['listeners'] = [{'id': self.listener_id}]
|
||||
ret_dict['rules'] = [{'id': rule.id} for rule in self.rules]
|
||||
if (ret_dict.get('action') ==
|
||||
lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL):
|
||||
del ret_dict['redirect_url']
|
||||
return ret_dict
|
||||
|
||||
|
||||
class Listener(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
||||
"""Represents a v2 neutron listener."""
|
||||
|
||||
NAME = 'listener'
|
||||
|
||||
__tablename__ = "lbaas_listeners"
|
||||
|
||||
__table_args__ = (
|
||||
sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port',
|
||||
name='uniq_loadbalancer_listener_port'),
|
||||
)
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(255))
|
||||
default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
|
||||
nullable=True)
|
||||
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
|
||||
"lbaas_loadbalancers.id"))
|
||||
protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS,
|
||||
name="listener_protocolsv2"),
|
||||
nullable=False)
|
||||
default_tls_container_id = sa.Column(sa.String(128),
|
||||
default=None, nullable=True)
|
||||
sni_containers = orm.relationship(
|
||||
SNI,
|
||||
backref=orm.backref("listener", uselist=False),
|
||||
uselist=True,
|
||||
primaryjoin="Listener.id==SNI.listener_id",
|
||||
order_by='SNI.position',
|
||||
collection_class=orderinglist.ordering_list(
|
||||
'position'),
|
||||
foreign_keys=[SNI.listener_id],
|
||||
cascade="all, delete-orphan"
|
||||
)
|
||||
protocol_port = sa.Column(sa.Integer, nullable=False)
|
||||
connection_limit = sa.Column(sa.Integer)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
provisioning_status = sa.Column(sa.String(16), nullable=False)
|
||||
operating_status = sa.Column(sa.String(16), nullable=False)
|
||||
default_pool = orm.relationship(
|
||||
PoolV2, backref=orm.backref("listeners"))
|
||||
loadbalancer = orm.relationship(
|
||||
LoadBalancer,
|
||||
backref=orm.backref("listeners", uselist=True))
|
||||
l7_policies = orm.relationship(
|
||||
L7Policy,
|
||||
uselist=True,
|
||||
primaryjoin="Listener.id==L7Policy.listener_id",
|
||||
order_by="L7Policy.position",
|
||||
collection_class=orderinglist.ordering_list('position', count_from=1),
|
||||
foreign_keys=[L7Policy.listener_id],
|
||||
cascade="all, delete-orphan",
|
||||
backref=orm.backref("listener"))
|
||||
|
||||
@property
|
||||
def root_loadbalancer(self):
|
||||
return self.loadbalancer
|
||||
|
||||
@property
|
||||
def to_api_dict(self):
|
||||
def to_dict(sa_model, attributes):
|
||||
ret = {}
|
||||
for attr in attributes:
|
||||
value = getattr(sa_model, attr)
|
||||
if six.PY2 and isinstance(value, six.text_type):
|
||||
ret[attr.encode('utf8')] = value.encode('utf8')
|
||||
else:
|
||||
ret[attr] = value
|
||||
return ret
|
||||
|
||||
ret_dict = to_dict(self, [
|
||||
'id', 'tenant_id', 'name', 'description', 'default_pool_id',
|
||||
'protocol', 'default_tls_container_id', 'protocol_port',
|
||||
'connection_limit', 'admin_state_up'])
|
||||
|
||||
# NOTE(blogan): Returning a list to future proof for M:N objects
|
||||
# that are not yet implemented.
|
||||
ret_dict['loadbalancers'] = []
|
||||
if self.loadbalancer:
|
||||
ret_dict['loadbalancers'].append({'id': self.loadbalancer.id})
|
||||
ret_dict['sni_container_refs'] = [container.tls_container_id
|
||||
for container in self.sni_containers]
|
||||
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
|
||||
ret_dict['l7policies'] = [{'id': l7_policy.id}
|
||||
for l7_policy in self.l7_policies]
|
||||
return ret_dict
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2018 VMware, Inc.
|
||||
# Copyright 2019 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
@ -15,17 +15,26 @@
|
||||
|
||||
import mock
|
||||
from neutron.tests import base
|
||||
from neutron_lbaas.services.loadbalancer import data_models as lb_models
|
||||
from neutron_lib import context
|
||||
from neutron_lib import exceptions as n_exc
|
||||
|
||||
from vmware_nsx.db import db as nsx_db
|
||||
from vmware_nsx.db import nsx_models
|
||||
from vmware_nsx.services.lbaas import base_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils
|
||||
from vmware_nsx.services.lbaas.nsx_v3.v2 import lb_driver_v2
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr
|
||||
from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr
|
||||
from vmware_nsx.services.lbaas.octavia import octavia_listener
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models
|
||||
from vmware_nsx.tests.unit.services.lbaas import lb_translators
|
||||
|
||||
|
||||
# TODO(asarfaty): Use octavia models for those tests
|
||||
LB_VIP = '10.0.0.10'
|
||||
LB_ROUTER_ID = 'router-x'
|
||||
ROUTER_ID = 'neutron-router-x'
|
||||
@ -145,11 +154,29 @@ class BaseTestEdgeLbaasV2(base.BaseTestCase):
|
||||
def _tested_entity(self):
|
||||
return None
|
||||
|
||||
def completor(self, success=True):
|
||||
self.last_completor_succees = success
|
||||
self.last_completor_called = True
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestEdgeLbaasV2, self).setUp()
|
||||
|
||||
self.last_completor_succees = False
|
||||
self.last_completor_called = False
|
||||
|
||||
self.context = context.get_admin_context()
|
||||
self.edge_driver = lb_driver_v2.EdgeLoadbalancerDriverV2()
|
||||
octavia_objects = {
|
||||
'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(),
|
||||
'listener': listener_mgr.EdgeListenerManagerFromDict(),
|
||||
'pool': pool_mgr.EdgePoolManagerFromDict(),
|
||||
'member': member_mgr.EdgeMemberManagerFromDict(),
|
||||
'healthmonitor':
|
||||
healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(),
|
||||
'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(),
|
||||
'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict()}
|
||||
|
||||
self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint(
|
||||
**octavia_objects)
|
||||
|
||||
self.lbv2_driver = mock.Mock()
|
||||
self.core_plugin = mock.Mock()
|
||||
@ -212,6 +239,30 @@ class BaseTestEdgeLbaasV2(base.BaseTestCase):
|
||||
value='val1',
|
||||
policy=self.l7policy)
|
||||
|
||||
# Translate LBaaS objects to dictionaries
|
||||
self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(
|
||||
self.lb)
|
||||
self.listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
self.listener)
|
||||
self.https_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
self.https_listener)
|
||||
self.terminated_https_listener_dict = lb_translators.\
|
||||
lb_listener_obj_to_dict(self.terminated_https_listener)
|
||||
self.pool_dict = lb_translators.lb_pool_obj_to_dict(
|
||||
self.pool)
|
||||
self.pool_persistency_dict = lb_translators.lb_pool_obj_to_dict(
|
||||
self.pool_persistency)
|
||||
self.member_dict = lb_translators.lb_member_obj_to_dict(
|
||||
self.member)
|
||||
self.hm_dict = lb_translators.lb_hm_obj_to_dict(
|
||||
self.hm)
|
||||
self.hm_http_dict = lb_translators.lb_hm_obj_to_dict(
|
||||
self.hm_http)
|
||||
self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict(
|
||||
self.l7policy)
|
||||
self.l7rule_dict = lb_translators.lb_l7rule_obj_to_dict(
|
||||
self.l7rule)
|
||||
|
||||
def tearDown(self):
|
||||
self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity)
|
||||
super(BaseTestEdgeLbaasV2, self).tearDown()
|
||||
@ -280,13 +331,10 @@ class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2):
|
||||
) as add_binding:
|
||||
mock_validate_lb_subnet.return_value = True
|
||||
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=False)
|
||||
self.edge_driver.loadbalancer.create(
|
||||
self.context, self.lb_dict, self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID,
|
||||
LB_ROUTER_ID, LB_VIP)
|
||||
create_service.assert_called_once()
|
||||
@ -306,13 +354,10 @@ class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2):
|
||||
) as add_binding:
|
||||
mock_validate_lb_subnet.return_value = True
|
||||
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=False)
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb_dict,
|
||||
self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID,
|
||||
LB_ROUTER_ID, LB_VIP)
|
||||
create_service.assert_not_called()
|
||||
@ -333,13 +378,10 @@ class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2):
|
||||
) as add_binding:
|
||||
mock_validate_lb_subnet.return_value = True
|
||||
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=False)
|
||||
self.edge_driver.loadbalancer.create(self.context, self.lb_dict,
|
||||
self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID,
|
||||
lb_utils.NO_ROUTER_ID, LB_VIP)
|
||||
create_service.assert_called_once()
|
||||
@ -348,13 +390,11 @@ class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2):
|
||||
new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb1-new',
|
||||
'new-description', 'some-subnet',
|
||||
'port-id', LB_VIP)
|
||||
|
||||
self.edge_driver.loadbalancer.update(self.context, self.lb, new_lb)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context, new_lb,
|
||||
delete=False)
|
||||
new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb)
|
||||
self.edge_driver.loadbalancer.update(self.context, self.lb_dict,
|
||||
new_lb_dict, self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
|
||||
@ -370,17 +410,15 @@ class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2):
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_lb_service.return_value = {'id': LB_SERVICE_ID}
|
||||
|
||||
self.edge_driver.loadbalancer.delete(self.context, self.lb)
|
||||
self.edge_driver.loadbalancer.delete(self.context, self.lb_dict,
|
||||
self.completor)
|
||||
|
||||
mock_delete_lb_service.assert_called_with(LB_SERVICE_ID)
|
||||
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
|
||||
mock_delete_lb_binding.assert_called_with(
|
||||
self.context.session, LB_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.load_balancer.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.lb,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_stats(self):
|
||||
pass
|
||||
@ -445,22 +483,20 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_create_app_profile.return_value = {'id': APP_PROFILE_ID}
|
||||
mock_create_virtual_server.return_value = {'id': LB_VS_ID}
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
listener = self.listener
|
||||
listener = self.listener_dict
|
||||
if protocol == 'HTTPS':
|
||||
listener = self.https_listener
|
||||
listener = self.https_listener_dict
|
||||
|
||||
self.edge_driver.listener.create(self.context, listener)
|
||||
self.edge_driver.listener.create(self.context, listener,
|
||||
self.completor)
|
||||
|
||||
mock_add_virtual_server.assert_called_with(LB_SERVICE_ID,
|
||||
LB_VS_ID)
|
||||
mock_add_listener_binding.assert_called_with(
|
||||
self.context.session, LB_ID, listener.id, APP_PROFILE_ID,
|
||||
self.context.session, LB_ID, listener['id'], APP_PROFILE_ID,
|
||||
LB_VS_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_http_listener(self):
|
||||
self._create_listener()
|
||||
@ -489,19 +525,18 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_create_virtual_server.return_value = {'id': LB_VS_ID}
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
|
||||
self.edge_driver.listener.create(self.context,
|
||||
self.terminated_https_listener)
|
||||
self.edge_driver.listener.create(
|
||||
self.context,
|
||||
self.terminated_https_listener_dict,
|
||||
self.completor)
|
||||
mock_add_virtual_server.assert_called_with(LB_SERVICE_ID,
|
||||
LB_VS_ID)
|
||||
mock_add_listener_binding.assert_called_with(
|
||||
self.context.session, LB_ID, HTTPS_LISTENER_ID, APP_PROFILE_ID,
|
||||
LB_VS_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.terminated_https_listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_listener_with_default_pool(self):
|
||||
listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -509,6 +544,7 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
LB_ID, 'HTTP', protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool)
|
||||
listener_dict = lb_translators.lb_listener_obj_to_dict(listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(self.app_client, 'create'
|
||||
@ -530,18 +566,16 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = None
|
||||
|
||||
self.edge_driver.listener.create(self.context, listener)
|
||||
self.edge_driver.listener.create(self.context, listener_dict,
|
||||
self.completor)
|
||||
|
||||
mock_add_virtual_server.assert_called_with(LB_SERVICE_ID,
|
||||
LB_VS_ID)
|
||||
mock_add_listener_binding.assert_called_with(
|
||||
self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID,
|
||||
LB_VS_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_listener_with_used_default_pool(self):
|
||||
listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -549,6 +583,7 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
LB_ID, 'HTTP', protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool)
|
||||
listener_dict = lb_translators.lb_listener_obj_to_dict(listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
|
||||
@ -561,7 +596,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.listener.create,
|
||||
self.context, listener)
|
||||
self.context, listener_dict,
|
||||
self.completor)
|
||||
|
||||
def test_create_listener_with_session_persistence(self):
|
||||
listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -570,6 +606,7 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
LB_ID, 'HTTP', protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool_persistency)
|
||||
listener_dict = lb_translators.lb_listener_obj_to_dict(listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(self.app_client, 'create'
|
||||
@ -593,19 +630,16 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_get_lb_binding.return_value = LB_BINDING
|
||||
mock_get_pool_binding.return_value = None
|
||||
|
||||
self.edge_driver.listener.create(self.context, listener)
|
||||
|
||||
self.edge_driver.listener.create(self.context, listener_dict,
|
||||
self.completor)
|
||||
mock_add_virtual_server.assert_called_with(LB_SERVICE_ID,
|
||||
LB_VS_ID)
|
||||
mock_add_listener_binding.assert_called_with(
|
||||
self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID,
|
||||
LB_VS_ID)
|
||||
mock_create_pp.assert_called_once()
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_listener_with_session_persistence_fail(self):
|
||||
listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -614,6 +648,7 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
LB_ID, 'TCP', protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool_persistency)
|
||||
listener_dict = lb_translators.lb_listener_obj_to_dict(listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
|
||||
@ -626,13 +661,16 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.listener.create,
|
||||
self.context, listener)
|
||||
self.context, listener_dict,
|
||||
self.completor)
|
||||
|
||||
def test_update(self):
|
||||
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
'listener1-new', 'new-description',
|
||||
None, LB_ID, protocol_port=80,
|
||||
loadbalancer=self.lb)
|
||||
new_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
new_listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
|
||||
@ -640,21 +678,21 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_get_floatingips.return_value = []
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
|
||||
self.edge_driver.listener.update(self.context, self.listener,
|
||||
new_listener)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_listener,
|
||||
delete=False)
|
||||
self.edge_driver.listener.update(self.context, self.listener_dict,
|
||||
new_listener_dict,
|
||||
self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update_with_default_pool(self):
|
||||
self.assertFalse(self.last_completor_called)
|
||||
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
'listener1-new', 'new-description',
|
||||
self.pool, LB_ID, protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool)
|
||||
new_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
new_listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
|
||||
@ -666,14 +704,10 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.listener.update(self.context, self.listener,
|
||||
new_listener)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_listener,
|
||||
delete=False)
|
||||
self.edge_driver.listener.update(self.context, self.listener_dict,
|
||||
new_listener_dict, self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update_with_session_persistence(self):
|
||||
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -683,6 +717,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool_persistency)
|
||||
new_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
new_listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
|
||||
@ -698,14 +734,11 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.listener.update(self.context, self.listener,
|
||||
new_listener)
|
||||
self.edge_driver.listener.update(self.context, self.listener_dict,
|
||||
new_listener_dict, self.completor)
|
||||
mock_create_pp.assert_called_once()
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_listener,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update_with_session_persistence_fail(self):
|
||||
old_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
|
||||
@ -715,6 +748,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=self.pool_persistency)
|
||||
old_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
old_listener)
|
||||
sess_persistence = lb_models.SessionPersistence(
|
||||
POOL_ID, 'SOURCE_IP')
|
||||
pool_persistency = lb_models.Pool(POOL_ID, LB_TENANT_ID,
|
||||
@ -731,6 +766,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
protocol_port=80,
|
||||
loadbalancer=self.lb,
|
||||
default_pool=pool_persistency)
|
||||
new_listener_dict = lb_translators.lb_listener_obj_to_dict(
|
||||
new_listener)
|
||||
with mock.patch.object(self.core_plugin, 'get_floatingips'
|
||||
) as mock_get_floatingips, \
|
||||
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
|
||||
@ -743,7 +780,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.listener.update,
|
||||
self.context, old_listener, new_listener)
|
||||
self.context, old_listener_dict,
|
||||
new_listener_dict, self.completor)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
|
||||
@ -769,7 +807,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
'id': LB_SERVICE_ID,
|
||||
'virtual_server_ids': [LB_VS_ID]}
|
||||
|
||||
self.edge_driver.listener.delete(self.context, self.listener)
|
||||
self.edge_driver.listener.delete(self.context, self.listener_dict,
|
||||
self.completor)
|
||||
|
||||
mock_remove_virtual_server.assert_called_with(LB_SERVICE_ID,
|
||||
LB_VS_ID)
|
||||
@ -778,12 +817,8 @@ class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2):
|
||||
|
||||
mock_delete_listener_binding.assert_called_with(
|
||||
self.context.session, LB_ID, LISTENER_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.listener.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.listener,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
@ -810,7 +845,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_create_pool.return_value = {'id': LB_POOL_ID}
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
|
||||
self.edge_driver.pool.create(self.context, self.pool)
|
||||
self.edge_driver.pool.create(self.context, self.pool_dict,
|
||||
self.completor)
|
||||
|
||||
mock_add_pool_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, LB_POOL_ID)
|
||||
@ -819,11 +855,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=None)
|
||||
mock_update_pool_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, LB_VS_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.pool,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def _test_create_with_persistency(self, vs_data, verify_func):
|
||||
with mock.patch.object(self.pool_client, 'create'
|
||||
@ -848,16 +881,15 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_create_pp.return_value = {'id': LB_PP_ID}
|
||||
mock_get_listener_binding.return_value = LISTENER_BINDING
|
||||
|
||||
self.edge_driver.pool.create(self.context, self.pool_persistency)
|
||||
self.edge_driver.pool.create(
|
||||
self.context, self.pool_persistency_dict, self.completor)
|
||||
|
||||
mock_add_pool_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, LB_POOL_ID)
|
||||
verify_func(mock_create_pp, mock_update_pp,
|
||||
mock_update_pool_binding, mock_vs_update)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.pool_persistency, delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_with_persistency(self):
|
||||
|
||||
@ -908,8 +940,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
|
||||
vs_data = {'id': LB_VS_ID,
|
||||
'persistence_profile_id': LB_PP_ID}
|
||||
self.pool_persistency.listener = None
|
||||
self.pool_persistency.listeners = []
|
||||
self.pool_persistency_dict['listener'] = None
|
||||
self.pool_persistency_dict['listeners'] = []
|
||||
self._test_create_with_persistency(vs_data, verify_func)
|
||||
|
||||
def test_create_multiple_listeners(self):
|
||||
@ -920,25 +952,25 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
listeners=[self.listener,
|
||||
self.https_listener],
|
||||
loadbalancer=self.lb)
|
||||
pool_dict = lb_translators.lb_pool_obj_to_dict(pool)
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.pool.create,
|
||||
self.context, pool)
|
||||
self.context, pool_dict, self.completor)
|
||||
|
||||
def test_update(self):
|
||||
new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '',
|
||||
None, 'HTTP', 'LEAST_CONNECTIONS',
|
||||
listener=self.listener)
|
||||
new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool)
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding:
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.pool.update(self.context, self.pool, new_pool)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_pool,
|
||||
delete=False)
|
||||
self.edge_driver.pool.update(self.context, self.pool_dict,
|
||||
new_pool_dict,
|
||||
self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update_multiple_listeners(self):
|
||||
"""Verify update action will fail if multiple listeners are set"""
|
||||
@ -948,15 +980,19 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
listeners=[self.listener,
|
||||
self.https_listener],
|
||||
loadbalancer=self.lb)
|
||||
new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool)
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding:
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.pool.update,
|
||||
self.context, self.pool, new_pool)
|
||||
self.context, self.pool_dict, new_pool_dict,
|
||||
self.completor)
|
||||
|
||||
def _test_update_with_persistency(self, vs_data, old_pool, new_pool,
|
||||
verify_func):
|
||||
old_pool_dict = lb_translators.lb_pool_obj_to_dict(old_pool)
|
||||
new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool)
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(self.pp_client, 'create'
|
||||
@ -974,14 +1010,13 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
mock_create_pp.return_value = {'id': LB_PP_ID}
|
||||
|
||||
self.edge_driver.pool.update(self.context, old_pool, new_pool)
|
||||
self.edge_driver.pool.update(self.context, old_pool_dict,
|
||||
new_pool_dict, self.completor)
|
||||
|
||||
verify_func(mock_create_pp, mock_update_pp,
|
||||
mock_delete_pp, mock_vs_update)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, new_pool, delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update_with_persistency(self):
|
||||
|
||||
@ -1033,19 +1068,16 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
|
||||
mock_get_lb_binding.return_value = None
|
||||
|
||||
self.edge_driver.pool.delete(self.context, self.pool)
|
||||
self.edge_driver.pool.delete(self.context, self.pool_dict,
|
||||
self.completor)
|
||||
|
||||
mock_update_virtual_server.assert_called_with(
|
||||
LB_VS_ID, persistence_profile_id=None, pool_id=None)
|
||||
mock_delete_pool.assert_called_with(LB_POOL_ID)
|
||||
mock_delete_pool_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.pool,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete_with_persistency(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
@ -1067,7 +1099,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_vs_get.return_value = {'id': LB_VS_ID,
|
||||
'persistence_profile_id': LB_PP_ID}
|
||||
|
||||
self.edge_driver.pool.delete(self.context, self.pool_persistency)
|
||||
self.edge_driver.pool.delete(
|
||||
self.context, self.pool_persistency_dict, self.completor)
|
||||
|
||||
mock_delete_pp.assert_called_once_with(LB_PP_ID)
|
||||
mock_update_virtual_server.assert_called_once_with(
|
||||
@ -1075,11 +1108,8 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
mock_delete_pool.assert_called_with(LB_POOL_ID)
|
||||
mock_delete_pool_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.pool.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.pool_persistency, delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def _verify_create(self, res_type, cookie_name, cookie_mode,
|
||||
mock_create_pp, mock_update_pp):
|
||||
@ -1140,10 +1170,9 @@ class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
|
||||
|
||||
mock_create_pp.return_value = {'id': LB_PP_ID}
|
||||
self.pool.session_persistence = session_persistence
|
||||
pool_dict = self.edge_driver.pool.translator(self.pool)
|
||||
list_dict = self.edge_driver.listener.translator(self.listener)
|
||||
pool_dict = lb_translators.lb_pool_obj_to_dict(self.pool)
|
||||
pp_id, post_func = lb_utils.setup_session_persistence(
|
||||
self.nsxlib, pool_dict, [], list_dict, vs_data)
|
||||
self.nsxlib, pool_dict, [], self.listener_dict, vs_data)
|
||||
|
||||
if session_persistence:
|
||||
self.assertEqual(LB_PP_ID, pp_id)
|
||||
@ -1250,14 +1279,12 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
mock_get_lb_service.return_value = {'id': LB_SERVICE_ID}
|
||||
mock_get_pool.return_value = LB_POOL
|
||||
|
||||
self.edge_driver.member.create(self.context, self.member)
|
||||
self.edge_driver.member.create(
|
||||
self.context, self.member_dict, self.completor)
|
||||
mock_update_pool_with_members.assert_called_with(LB_POOL_ID,
|
||||
[LB_MEMBER])
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.member,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_external_vip(self):
|
||||
with mock.patch.object(lb_utils, 'validate_lb_member_subnet'
|
||||
@ -1294,14 +1321,12 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
mock_get_lb_service.return_value = {'id': LB_SERVICE_ID}
|
||||
mock_get_pool.return_value = LB_POOL
|
||||
|
||||
self.edge_driver.member.create(self.context, self.member)
|
||||
self.edge_driver.member.create(
|
||||
self.context, self.member_dict, self.completor)
|
||||
mock_update_pool_with_members.assert_called_with(LB_POOL_ID,
|
||||
[LB_MEMBER])
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.member,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
mock_update_lb_binding.assert_called_once_with(
|
||||
mock.ANY, LB_ID, LB_ROUTER_ID)
|
||||
|
||||
@ -1319,12 +1344,14 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
self.assertRaises(n_exc.BadRequest,
|
||||
self.edge_driver.member.create,
|
||||
self.context,
|
||||
self.member)
|
||||
self.member_dict,
|
||||
self.completor)
|
||||
|
||||
def test_update(self):
|
||||
new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID,
|
||||
MEMBER_ADDRESS, 80, 2, pool=self.pool,
|
||||
name='member-nnn-nnn')
|
||||
new_member_dict = lb_translators.lb_member_obj_to_dict(new_member)
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
) as mock_get_pool_binding, \
|
||||
mock.patch.object(self.pool_client, 'get'
|
||||
@ -1335,14 +1362,10 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
mock_get_pool.return_value = LB_POOL_WITH_MEMBER
|
||||
mock_get_network_from_subnet.return_value = LB_NETWORK
|
||||
|
||||
self.edge_driver.member.update(self.context, self.member,
|
||||
new_member)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_member,
|
||||
delete=False)
|
||||
self.edge_driver.member.update(self.context, self.member_dict,
|
||||
new_member_dict, self.completor)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
|
||||
@ -1360,15 +1383,12 @@ class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2):
|
||||
mock_get_network_from_subnet.return_value = LB_NETWORK
|
||||
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
|
||||
|
||||
self.edge_driver.member.delete(self.context, self.member)
|
||||
self.edge_driver.member.delete(self.context, self.member_dict,
|
||||
self.completor)
|
||||
|
||||
mock_update_pool_with_members.assert_called_with(LB_POOL_ID, [])
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.member.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.member,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
@ -1391,19 +1411,16 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
mock_create_monitor.return_value = {'id': LB_MONITOR_ID}
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.healthmonitor.create(self.context, self.hm)
|
||||
self.edge_driver.healthmonitor.create(
|
||||
self.context, self.hm_dict, self.completor)
|
||||
|
||||
mock_add_monitor_to_pool.assert_called_with(LB_POOL_ID,
|
||||
LB_MONITOR_ID)
|
||||
mock_add_monitor_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, HM_ID, LB_MONITOR_ID,
|
||||
LB_POOL_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.hm,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_create_http(self):
|
||||
with mock.patch.object(self.monitor_client, 'create'
|
||||
@ -1418,7 +1435,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
# Verify HTTP-specific monitor parameters are added
|
||||
self.edge_driver.healthmonitor.create(self.context, self.hm_http)
|
||||
self.edge_driver.healthmonitor.create(
|
||||
self.context, self.hm_http_dict, self.completor)
|
||||
self.assertEqual(1, len(mock_create_monitor.mock_calls))
|
||||
kw_args = mock_create_monitor.mock_calls[0][2]
|
||||
self.assertEqual(self.hm_http.http_method,
|
||||
@ -1430,12 +1448,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
mock_add_monitor_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, HM_ID, LB_MONITOR_ID,
|
||||
LB_POOL_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.hm_http,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
with mock.patch.object(self.monitor_client, 'update'
|
||||
@ -1446,17 +1460,15 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
new_hm = lb_models.HealthMonitor(
|
||||
HM_ID, LB_TENANT_ID, 'PING', 5, 5,
|
||||
5, pool=self.pool, name='new_name')
|
||||
new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm)
|
||||
self.edge_driver.healthmonitor.update(
|
||||
self.context, self.hm, new_hm)
|
||||
self.context, self.hm_dict, new_hm_dict, self.completor)
|
||||
mock_update_monitor.assert_called_with(
|
||||
LB_MONITOR_ID, display_name=mock.ANY,
|
||||
fall_count=5, interval=5, timeout=5,
|
||||
resource_type='LbIcmpMonitor')
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context, new_hm,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_monitor_binding'
|
||||
@ -1471,7 +1483,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
) as mock_delete_monitor_binding:
|
||||
mock_get_monitor_binding.return_value = HM_BINDING
|
||||
|
||||
self.edge_driver.healthmonitor.delete(self.context, self.hm)
|
||||
self.edge_driver.healthmonitor.delete(
|
||||
self.context, self.hm_dict, self.completor)
|
||||
|
||||
mock_remove_monitor_from_pool.assert_called_with(LB_POOL_ID,
|
||||
LB_MONITOR_ID)
|
||||
@ -1479,12 +1492,8 @@ class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2):
|
||||
mock_delete_monitor.assert_called_with(LB_MONITOR_ID)
|
||||
mock_delete_monitor_binding.assert_called_with(
|
||||
self.context.session, LB_ID, POOL_ID, HM_ID)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.health_monitor.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.hm,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
@ -1513,17 +1522,15 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
mock_create_rule.return_value = {'id': LB_RULE_ID}
|
||||
mock_get_virtual_server.return_value = {'id': LB_VS_ID}
|
||||
|
||||
self.edge_driver.l7policy.create(self.context, self.l7policy)
|
||||
self.edge_driver.l7policy.create(
|
||||
self.context, self.l7policy_dict, self.completor)
|
||||
|
||||
mock_update_virtual_server.assert_called_with(
|
||||
LB_VS_ID, rule_ids=[LB_RULE_ID])
|
||||
mock_add_l7policy_binding.assert_called_with(
|
||||
self.context.session, L7POLICY_ID, LB_RULE_ID, LB_VS_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.l7policy,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID,
|
||||
@ -1532,6 +1539,7 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
action='REJECT',
|
||||
listener=self.listener,
|
||||
position=2)
|
||||
new_policy_dict = lb_translators.lb_l7policy_obj_to_dict(new_l7policy)
|
||||
vs_with_rules = {
|
||||
'id': LB_VS_ID,
|
||||
'rule_ids': [LB_RULE_ID, 'abc', 'xyz']
|
||||
@ -1558,18 +1566,15 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
mock_get_virtual_server.return_value = vs_with_rules
|
||||
|
||||
self.edge_driver.l7policy.update(self.context, self.l7policy,
|
||||
new_l7policy)
|
||||
self.edge_driver.l7policy.update(self.context, self.l7policy_dict,
|
||||
new_policy_dict, self.completor)
|
||||
|
||||
mock_update_rule.assert_called_with(LB_RULE_ID,
|
||||
**rule_body)
|
||||
mock_update_virtual_server.assert_called_with(
|
||||
LB_VS_ID, rule_ids=['abc', LB_RULE_ID, 'xyz'])
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_l7policy,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding'
|
||||
@ -1585,17 +1590,16 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2):
|
||||
mock_get_l7policy_binding.return_value = L7POLICY_BINDING
|
||||
mock_get_neutron_from_nsx_router_id.return_value = LB_ROUTER_ID
|
||||
|
||||
self.edge_driver.l7policy.delete(self.context, self.l7policy)
|
||||
self.edge_driver.l7policy.delete(
|
||||
self.context, self.l7policy_dict, self.completor)
|
||||
|
||||
mock_vs_remove_rule.assert_called_with(LB_VS_ID, LB_RULE_ID)
|
||||
mock_delete_rule.assert_called_with(LB_RULE_ID)
|
||||
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
|
||||
mock_delete_l7policy_binding.assert_called_with(
|
||||
self.context.session, L7POLICY_ID)
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7policy.successful_completion)
|
||||
mock_successful_completion.assert_called_with(
|
||||
self.context, self.l7policy, delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
|
||||
class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
@ -1630,16 +1634,13 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_get_l7policy_binding.return_value = L7POLICY_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.l7rule.create(self.context, self.l7rule)
|
||||
self.edge_driver.l7rule.create(
|
||||
self.context, self.l7rule_dict, self.completor)
|
||||
|
||||
mock_update_rule.assert_called_with(LB_RULE_ID,
|
||||
**create_rule_body)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.l7rule,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_update(self):
|
||||
new_l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID,
|
||||
@ -1650,6 +1651,7 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
key='cookie1',
|
||||
value='xxxxx',
|
||||
policy=self.l7policy)
|
||||
new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_l7rule)
|
||||
self.l7policy.rules = [new_l7rule]
|
||||
update_rule_body = {
|
||||
'match_conditions': [{
|
||||
@ -1673,17 +1675,13 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_get_l7policy_binding.return_value = L7POLICY_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.l7rule.update(self.context, self.l7rule,
|
||||
new_l7rule)
|
||||
self.edge_driver.l7rule.update(self.context, self.l7rule_dict,
|
||||
new_rule_dict, self.completor)
|
||||
|
||||
mock_update_rule.assert_called_with(LB_RULE_ID,
|
||||
**update_rule_body)
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
new_l7rule,
|
||||
delete=False)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
||||
def test_delete(self):
|
||||
self.l7policy.rules = [self.l7rule]
|
||||
@ -1707,14 +1705,11 @@ class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2):
|
||||
mock_get_l7policy_binding.return_value = L7POLICY_BINDING
|
||||
mock_get_pool_binding.return_value = POOL_BINDING
|
||||
|
||||
self.edge_driver.l7rule.delete(self.context, self.l7rule)
|
||||
self.edge_driver.l7rule.delete(
|
||||
self.context, self.l7rule_dict, self.completor)
|
||||
|
||||
mock_update_rule.assert_called_with(LB_RULE_ID,
|
||||
**delete_rule_body)
|
||||
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
|
||||
|
||||
mock_successful_completion = (
|
||||
self.lbv2_driver.l7rule.successful_completion)
|
||||
mock_successful_completion.assert_called_with(self.context,
|
||||
self.l7rule,
|
||||
delete=True)
|
||||
self.assertTrue(self.last_completor_called)
|
||||
self.assertTrue(self.last_completor_succees)
|
||||
|
Loading…
Reference in New Issue
Block a user