Browse Source

Get Me A LB

Allows a request to be passed to the lbaas API that contains the full graph
structure of a load balancer.  This means that an entire load balancer graph
can be created in one POST to the API, or a partial graph can be.

This creates a new driver method to the driver interface that when implemented
will be called when a full or partial graph is attempted to be created.  If
the driver does not implement this method, then the request is not allowed.

Co-Author: Trevor Vardeman <trevor.vardeman@rackspace.com>

Closes-Bug: #1463202
Depends-On: Id3a5ddb8efded8c6ad72a7118424ec01c777318d
Change-Id: Ic16ca4cb3cb407d9e91eea74315a39bf86c654f3
changes/01/257201/36
Trevor Vardeman 6 years ago
parent
commit
eafbfd23b2
  1. 92
      neutron_lbaas/db/loadbalancer/loadbalancer_dbv2.py
  2. 16
      neutron_lbaas/drivers/driver_base.py
  3. 43
      neutron_lbaas/drivers/driver_mixins.py
  4. 4
      neutron_lbaas/drivers/logging_noop/driver.py
  5. 212
      neutron_lbaas/drivers/octavia/driver.py
  6. 129
      neutron_lbaas/extensions/lb_graph.py
  7. 4
      neutron_lbaas/extensions/loadbalancerv2.py
  8. 83
      neutron_lbaas/services/loadbalancer/data_models.py
  9. 147
      neutron_lbaas/services/loadbalancer/plugin.py
  10. 12
      neutron_lbaas/tests/base.py
  11. 420
      neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancerv2.py
  12. 149
      neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py
  13. 8
      neutron_lbaas/tests/unit/services/loadbalancer/test_loadbalancer_plugin.py
  14. 6
      neutron_lbaas/tests/unit/test_agent_scheduler.py

92
neutron_lbaas/db/loadbalancer/loadbalancer_dbv2.py

@ -21,12 +21,13 @@ from neutron.callbacks import resources
from neutron.db import common_db_mixin as base_db
from neutron import manager
from neutron.plugins.common import constants
from neutron_lib import constants as n_constants
from neutron_lib import constants as n_const
from neutron_lib import exceptions as n_exc
from oslo_db import exception
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
from sqlalchemy import exc as sqlalchemy_exc
from sqlalchemy import orm
from sqlalchemy.orm import exc
@ -92,17 +93,17 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
# resolve subnet and create port
subnet = self._core_plugin.get_subnet(context, lb_db.vip_subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != n_constants.ATTR_NOT_SPECIFIED:
if ip_address and ip_address != n_const.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': lb_db.tenant_id,
'name': 'loadbalancer-' + lb_db.id,
'network_id': subnet['network_id'],
'mac_address': n_constants.ATTR_NOT_SPECIFIED,
'mac_address': n_const.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': lb_db.id,
'device_owner': n_constants.DEVICE_OWNER_LOADBALANCERV2,
'device_owner': n_const.DEVICE_OWNER_LOADBALANCERV2,
'fixed_ips': [fixed_ip]
}
@ -203,6 +204,66 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
model_db.operating_status != operating_status):
model_db.operating_status = operating_status
def create_loadbalancer_graph(self, context, loadbalancer,
allocate_vip=True):
l7policies_ids = []
with context.session.begin(subtransactions=True):
listeners = loadbalancer.pop('listeners', [])
lb_db = self.create_loadbalancer(context, loadbalancer,
allocate_vip=allocate_vip)
for listener in listeners:
listener['loadbalancer_id'] = lb_db.id
default_pool = listener.pop('default_pool', None)
if (default_pool and
default_pool != n_const.ATTR_NOT_SPECIFIED):
default_pool['loadbalancer_id'] = lb_db.id
hm = default_pool.pop('healthmonitor', None)
if hm and hm != n_const.ATTR_NOT_SPECIFIED:
hm_db = self.create_healthmonitor(context, hm)
default_pool['healthmonitor_id'] = hm_db.id
members = default_pool.pop('members', [])
pool_db = self.create_pool(context, default_pool)
listener['default_pool_id'] = pool_db.id
for member in members:
member['pool_id'] = pool_db.id
self.create_pool_member(context, member, pool_db.id)
l7policies = listener.pop('l7policies', None)
listener_db = self.create_listener(context, listener)
if (l7policies and l7policies !=
n_const.ATTR_NOT_SPECIFIED):
for l7policy in l7policies:
l7policy['listener_id'] = listener_db.id
redirect_pool = l7policy.pop('redirect_pool', None)
l7rules = l7policy.pop('rules', [])
if (redirect_pool and redirect_pool !=
n_const.ATTR_NOT_SPECIFIED):
redirect_pool['loadbalancer_id'] = lb_db.id
rhm = redirect_pool.pop('healthmonitor', None)
rmembers = redirect_pool.pop('members', [])
if rhm and rhm != n_const.ATTR_NOT_SPECIFIED:
rhm_db = self.create_healthmonitor(context,
rhm)
redirect_pool['healthmonitor_id'] = rhm_db.id
rpool_db = self.create_pool(context, redirect_pool)
l7policy['redirect_pool_id'] = rpool_db.id
for rmember in rmembers:
rmember['pool_id'] = rpool_db.id
self.create_pool_member(context, rmember,
rpool_db.id)
l7policy_db = self.create_l7policy(context, l7policy)
l7policies_ids.append(l7policy_db.id)
if (l7rules and l7rules !=
n_const.ATTR_NOT_SPECIFIED):
for l7rule in l7rules:
self.create_l7policy_rule(
context, l7rule, l7policy_db.id)
# SQL Alchemy cache issue where l7rules won't show up as intended.
for l7policy_id in l7policies_ids:
l7policy_db = self._get_resource(context, models.L7Policy,
l7policy_id)
context.session.expire(l7policy_db)
return self.get_loadbalancer(context, lb_db.id)
def create_loadbalancer(self, context, loadbalancer, allocate_vip=True):
with context.session.begin(subtransactions=True):
self._load_id(context, loadbalancer)
@ -215,6 +276,7 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
lb_db.stats = self._create_loadbalancer_stats(
context, lb_db.id)
context.session.add(lb_db)
context.session.flush()
# create port outside of lb create transaction since it can sometimes
# cause lock wait timeouts
@ -225,7 +287,11 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
vip_address)
except Exception:
with excutils.save_and_reraise_exception():
context.session.delete(lb_db)
try:
context.session.delete(lb_db)
except sqlalchemy_exc.InvalidRequestError:
# Revert already completed.
pass
context.session.flush()
return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
@ -247,7 +313,7 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
port_db = self._core_plugin._get_port(context, port_id)
except n_exc.PortNotFound:
return
if port_db['device_owner'] == n_constants.DEVICE_OWNER_LOADBALANCERV2:
if port_db['device_owner'] == n_const.DEVICE_OWNER_LOADBALANCERV2:
filters = {'vip_port_id': [port_id]}
if len(self.get_loadbalancers(context, filters=filters)) > 0:
reason = _('has device owner %s') % port_db['device_owner']
@ -384,14 +450,17 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
# Check for unspecified loadbalancer_id and listener_id and
# set to None
for id in ['loadbalancer_id', 'default_pool_id']:
if listener.get(id) == n_constants.ATTR_NOT_SPECIFIED:
if listener.get(id) == n_const.ATTR_NOT_SPECIFIED:
listener[id] = None
self._validate_listener_data(context, listener)
sni_container_ids = []
if 'sni_container_ids' in listener:
sni_container_ids = listener.pop('sni_container_ids')
listener_db_entry = models.Listener(**listener)
try:
listener_db_entry = models.Listener(**listener)
except Exception as exc:
raise exc
for container_id in sni_container_ids:
sni = models.SNI(listener_id=listener_db_entry.id,
tls_container_id=container_id)
@ -485,7 +554,7 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
pool['provisioning_status'] = constants.PENDING_CREATE
pool['operating_status'] = lb_const.OFFLINE
session_info = pool.pop('session_persistence')
session_info = pool.pop('session_persistence', None)
pool_db = models.PoolV2(**pool)
if session_info:
@ -667,8 +736,11 @@ class LoadBalancerPluginDbv2(base_db.CommonDbMixin,
loadbalancer.stats)
def create_l7policy(self, context, l7policy):
if l7policy['redirect_pool_id'] == n_constants.ATTR_NOT_SPECIFIED:
if (l7policy.get('redirect_pool_id') and
l7policy['redirect_pool_id'] == n_const.ATTR_NOT_SPECIFIED):
l7policy['redirect_pool_id'] = None
if not l7policy.get('position'):
l7policy['position'] = 2147483647
self._validate_l7policy_data(context, l7policy)
with context.session.begin(subtransactions=True):

16
neutron_lbaas/drivers/driver_base.py

@ -87,6 +87,18 @@ class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
driver_mixins.BaseManagerMixin):
model_class = models.LoadBalancer
@property
def allows_create_graph(self):
"""
Can this driver create a load balancer graph in one call.
Return True if this driver has the capability to create a load balancer
and any of its children in one driver call. If this returns True and
the user requests the creation of a load balancer graph, then the
create_graph method will be called to create the load balancer.
"""
return False
@property
def allocates_vip(self):
"""Does this driver need to allocate its own virtual IPs"""
@ -163,10 +175,12 @@ def driver_op(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
d = (func.__name__ == 'delete')
lb_create = ((func.__name__ == 'create') and
isinstance(args[0], BaseLoadBalancerManager))
try:
r = func(*args, **kwargs)
args[0].successful_completion(
args[1], args[2], delete=d)
args[1], args[2], delete=d, lb_create=lb_create)
return r
except Exception:
with excutils.save_and_reraise_exception():

43
neutron_lbaas/drivers/driver_mixins.py

@ -47,6 +47,46 @@ class BaseManagerMixin(object):
def delete(self, context, obj):
pass
def _successful_completion_lb_graph(self, context, obj):
listeners = obj.listeners
obj.listeners = []
for listener in listeners:
# need to maintain the link from the child to the load balancer
listener.loadbalancer = obj
pool = listener.default_pool
l7_policies = listener.l7_policies
if pool:
pool.listener = listener
hm = pool.healthmonitor
if hm:
hm.pool = pool
self.successful_completion(context, hm)
for member in pool.members:
member.pool = pool
self.successful_completion(context, member)
self.successful_completion(context, pool)
if l7_policies:
for l7policy in l7_policies:
l7policy.listener = listener
l7rules = l7policy.rules
for l7rule in l7rules:
l7rule.l7policy = l7policy
self.successful_completion(context, l7rule)
redirect_pool = l7policy.redirect_pool
if redirect_pool:
redirect_pool.listener = listener
rhm = redirect_pool.healthmonitor
if rhm:
rhm.pool = redirect_pool
self.successful_completion(context, rhm)
for rmember in redirect_pool.members:
rmember.pool = redirect_pool
self.successful_completion(context, rmember)
self.successful_completion(context, redirect_pool)
self.successful_completion(context, l7policy)
self.successful_completion(context, listener)
self.successful_completion(context, obj)
def successful_completion(self, context, obj, delete=False,
lb_create=False):
"""
@ -64,6 +104,9 @@ class BaseManagerMixin(object):
"""
LOG.debug("Starting successful_completion method after a successful "
"driver action.")
if lb_create and obj.listeners:
self._successful_completion_lb_graph(context, obj)
return
obj_sa_cls = data_models.DATA_MODEL_TO_SA_MODEL_MAP[obj.__class__]
if delete:
# Check if driver is responsible for vip allocation. If the driver

4
neutron_lbaas/drivers/logging_noop/driver.py

@ -59,6 +59,10 @@ class LoggingNoopCommonManager(object):
class LoggingNoopLoadBalancerManager(LoggingNoopCommonManager,
driver_base.BaseLoadBalancerManager):
@property
def allows_create_graph(self):
return True
@property
def allocates_vip(self):
LOG.debug('allocates_vip queried')

212
neutron_lbaas/drivers/octavia/driver.py

@ -192,6 +192,10 @@ class LoadBalancerManager(driver_base.BaseLoadBalancerManager):
s += '/%s' % id
return s
@property
def allows_create_graph(self):
return True
@property
def allocates_vip(self):
return cfg.CONF.octavia.allocates_vip
@ -200,32 +204,42 @@ class LoadBalancerManager(driver_base.BaseLoadBalancerManager):
def deletes_cascade(self):
return True
def _construct_args(self, db_lb, create=True, graph=False):
args = {'name': db_lb.name,
'description': db_lb.description,
'enabled': db_lb.admin_state_up}
if not create:
return args
create_args = {'project_id': db_lb.tenant_id, 'id': db_lb.id,
'vip': {'subnet_id': db_lb.vip_subnet_id,
'ip_address': db_lb.vip_address,
'port_id': db_lb.vip_port_id}}
args.update(create_args)
if not graph:
return args
if db_lb.listeners:
args['listeners'] = []
for db_listener in db_lb.listeners:
listener_args = self.driver.listener._construct_args(db_listener,
graph=True)
args['listeners'].append(listener_args)
return args
def create_and_allocate_vip(self, context, lb):
self.create(context, lb)
@async_op
def create(self, context, lb):
args = {
'id': lb.id,
'name': lb.name,
'description': lb.description,
'enabled': lb.admin_state_up,
'project_id': lb.tenant_id,
'vip': {
'subnet_id': lb.vip_subnet_id,
'ip_address': lb.vip_address,
'port_id': lb.vip_port_id,
}
}
graph = (lb.listeners and len(lb.listeners) > 0)
args = self._construct_args(lb, graph=graph)
self.driver.req.post(self._url(lb), args)
@async_op
def update(self, context, old_lb, lb):
args = {
'name': lb.name,
'description': lb.description,
'enabled': lb.admin_state_up,
}
args = self._construct_args(lb, create=False)
self.driver.req.put(self._url(lb, lb.id), args)
@async_op
@ -256,8 +270,7 @@ class ListenerManager(driver_base.BaseListenerManager):
s += '/%s' % id
return s
@classmethod
def _write(cls, write_func, url, listener, create=True):
def _construct_args(self, listener, create=True, graph=False):
sni_container_ids = [sni.tls_container_id
for sni in listener.sni_containers]
args = {
@ -271,19 +284,40 @@ class ListenerManager(driver_base.BaseListenerManager):
'default_pool_id': listener.default_pool_id,
'sni_containers': sni_container_ids
}
if create:
args['project_id'] = listener.tenant_id
args['id'] = listener.id
write_func(url, args)
if not create:
return args
args['project_id'] = listener.tenant_id
args['id'] = listener.id
if not graph:
return args
del args['default_pool_id']
if listener.default_pool:
pool = listener.default_pool
args['default_pool'] = self.driver.pool._construct_args(pool,
graph=True)
if listener.l7_policies:
args['l7policies'] = []
l7_policies = listener.l7_policies
for l7_policy in l7_policies:
l7_policy_args = self.driver.l7policy._construct_args(
l7_policy, graph=True)
args['l7policies'].append(l7_policy_args)
return args
@async_op
def create(self, context, listener):
self._write(self.driver.req.post, self._url(listener), listener)
args = self._construct_args(listener)
self.driver.req.post(self._url(listener), args)
@async_op
def update(self, context, old_listener, listener):
self._write(self.driver.req.put, self._url(listener, id=listener.id),
listener, create=False)
args = self._construct_args(listener, create=False)
self.driver.req.put(self._url(listener, id=listener.id), args)
@async_op
def delete(self, context, listener):
@ -300,8 +334,7 @@ class PoolManager(driver_base.BasePoolManager):
s += '/%s' % id
return s
@classmethod
def _write(cls, write_func, url, pool, create=True):
def _construct_args(self, pool, create=True, graph=False):
args = {
'name': pool.name,
'description': pool.description,
@ -316,21 +349,38 @@ class PoolManager(driver_base.BasePoolManager):
}
else:
args['session_persistence'] = None
if create:
args['project_id'] = pool.tenant_id
args['id'] = pool.id
if pool.listeners:
args['listener_id'] = pool.listeners[0].id
write_func(url, args)
if not create:
return args
args['project_id'] = pool.tenant_id
args['id'] = pool.id
if pool.listeners:
args['listener_id'] = pool.listeners[0].id
if not graph:
return args
if pool.members:
args['members'] = []
for member in pool.members:
member_args = self.driver.member._construct_args(member)
args['members'].append(member_args)
if pool.healthmonitor:
hm_args = self.driver.health_monitor._construct_args(
pool.healthmonitor)
args['health_monitor'] = hm_args
return args
@async_op
def create(self, context, pool):
self._write(self.driver.req.post, self._url(pool), pool)
args = self._construct_args(pool)
self.driver.req.post(self._url(pool), args)
@async_op
def update(self, context, old_pool, pool):
self._write(self.driver.req.put, self._url(pool, id=pool.id), pool,
create=False)
args = self._construct_args(pool, create=False)
self.driver.req.put(self._url(pool, id=pool.id), args)
@async_op
def delete(self, context, pool):
@ -348,26 +398,33 @@ class MemberManager(driver_base.BaseMemberManager):
s += '/%s' % id
return s
@async_op
def create(self, context, member):
def _construct_args(self, member, create=True):
args = {
'id': member.id,
'enabled': member.admin_state_up,
'ip_address': member.address,
'protocol_port': member.protocol_port,
'weight': member.weight,
'weight': member.weight
}
if not create:
return args
create_args = {
'id': member.id,
'ip_address': member.address,
'subnet_id': member.subnet_id,
'project_id': member.tenant_id
}
args.update(create_args)
return args
@async_op
def create(self, context, member):
args = self._construct_args(member)
self.driver.req.post(self._url(member), args)
@async_op
def update(self, context, old_member, member):
args = {
'enabled': member.admin_state_up,
'protocol_port': member.protocol_port,
'weight': member.weight,
}
args = self._construct_args(member, create=False)
self.driver.req.put(self._url(member, member.id), args)
@async_op
@ -384,8 +441,7 @@ class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
hm.pool.id)
return s
@classmethod
def _write(cls, write_func, url, hm, create=True):
def _construct_args(self, hm, create=True):
args = {
'type': hm.type,
'delay': hm.delay,
@ -399,15 +455,17 @@ class HealthMonitorManager(driver_base.BaseHealthMonitorManager):
}
if create:
args['project_id'] = hm.tenant_id
write_func(url, args)
return args
@async_op
def create(self, context, hm):
self._write(self.driver.req.post, self._url(hm), hm)
args = self._construct_args(hm)
self.driver.req.post(self._url(hm), args)
@async_op
def update(self, context, old_hm, hm):
self._write(self.driver.req.put, self._url(hm), hm, create=False)
args = self._construct_args(hm, create=False)
self.driver.req.put(self._url(hm), args)
@async_op
def delete(self, context, hm):
@ -425,36 +483,55 @@ class L7PolicyManager(driver_base.BaseL7PolicyManager):
s += '/%s' % id
return s
@classmethod
def _write(cls, write_func, url, l7p, create=True):
def _construct_args(self, l7p, create=True, graph=False):
args = {
'name': l7p.name,
'description': l7p.description,
'action': l7p.action,
'redirect_pool_id': l7p.redirect_pool_id,
'redirect_url': l7p.redirect_url,
'position': l7p.position,
'enabled': l7p.admin_state_up
}
if args['action'] == constants.L7_POLICY_ACTION_REJECT:
del args['redirect_url']
del args['redirect_pool_id']
elif args['action'] == constants.L7_POLICY_ACTION_REDIRECT_TO_POOL:
args['redirect_pool_id'] = l7p.redirect_pool_id
del args['redirect_url']
elif args['action'] == constants.L7_POLICY_ACTION_REDIRECT_TO_URL:
if args.get('redirect_pool_id'):
del args['redirect_pool_id']
if not create:
return args
args['id'] = l7p.id
if not graph:
if l7p.listener_id:
args['listener_id'] = l7p.listener_id
return args
if (l7p.redirect_pool and l7p.action ==
constants.L7_POLICY_ACTION_REDIRECT_TO_POOL):
del args['redirect_pool_id']
if create:
args['id'] = l7p.id
write_func(url, args)
pool_args = self.driver.pool._construct_args(l7p.redirect_pool,
graph=True)
args['redirect_pool'] = pool_args
if l7p.rules:
args['l7rules'] = []
for rule in l7p.rules:
rule_args = self.driver.l7rule._construct_args(rule)
args['l7rules'].append(rule_args)
return args
@async_op
def create(self, context, l7p):
self._write(self.driver.req.post, self._url(l7p), l7p)
args = self._construct_args(l7p)
self.driver.req.post(self._url(l7p), args)
@async_op
def update(self, context, old_l7p, l7p):
self._write(self.driver.req.put, self._url(l7p, id=l7p.id),
l7p, create=False)
args = self._construct_args(l7p, create=False)
self.driver.req.put(self._url(l7p, id=l7p.id), args)
@async_op
def delete(self, context, l7p):
@ -474,7 +551,7 @@ class L7RuleManager(driver_base.BaseL7RuleManager):
return s
@classmethod
def _write(cls, write_func, url, l7r, create=True):
def _construct_args(cls, l7r, create=True):
args = {
'type': l7r.type,
'compare_type': l7r.compare_type,
@ -484,16 +561,17 @@ class L7RuleManager(driver_base.BaseL7RuleManager):
}
if create:
args['id'] = l7r.id
write_func(url, args)
return args
@async_op
def create(self, context, l7r):
self._write(self.driver.req.post, self._url(l7r), l7r)
args = self._construct_args(l7r)
self.driver.req.post(self._url(l7r), args)
@async_op
def update(self, context, old_l7r, l7r):
self._write(self.driver.req.put, self._url(l7r, id=l7r.id),
l7r, create=False)
args = self._construct_args(l7r, create=False)
self.driver.req.put(self._url(l7r, id=l7r.id), args)
@async_op
def delete(self, context, l7r):

129
neutron_lbaas/extensions/lb_graph.py

@ -0,0 +1,129 @@
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants
from neutron_lbaas._i18n import _
from neutron_lbaas.extensions import loadbalancerv2
LOG = logging.getLogger(__name__)
class ProviderCannotCreateLoadBalancerGraph(nexception.BadRequest):
message = _("The provider does not have the ability to create a load "
"balancer graph.")
# NOTE(blogan): this dictionary is to be used only for importing from the
# plugin to validate against. It is only put here for consistency with
# all other extensions and an easy place to look what changes this extension
# allows.
RESOURCE_ATTRIBUTE_MAP = {
'graphs': {
'loadbalancer': {'allow_post': True, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'is_visible': True}
}
}
EXISTING_ATTR_GRAPH_ATTR_MAP = {
'loadbalancers': {
'listeners': {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': []
}
},
'listeners': {
'default_pool': {
'allow_post': True, 'allow_put': False, 'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED
},
'l7policies': {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': []
}
},
'pools': {
'healthmonitor': {
'allow_post': True, 'allow_put': False, 'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED
},
'members': {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': []
}
},
'l7policies': {
'rules': {
'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': []
},
'redirect_pool': {
'allow_post': True, 'allow_put': False, 'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED
},
'listener_id': {
'allow_post': False, 'allow_put': False, 'is_visible': True
}
}
}
class Lb_graph(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Load Balancer Graph"
@classmethod
def get_alias(cls):
return "lb-graph"
@classmethod
def get_description(cls):
return "Extension for allowing the creation of load balancers with a" \
" full graph in one API request."
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/neutron/LBaaS/API_2.0"
@classmethod
def get_updated(cls):
return "2016-02-09T10:00:00-00:00"
def get_required_extensions(self):
return ["lbaasv2"]
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
resources = resource_helper.build_resource_info(
plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.LOADBALANCERV2,
register_quota=True)
return resources
@classmethod
def get_plugin_interface(cls):
return loadbalancerv2.LoadBalancerPluginBaseV2

4
neutron_lbaas/extensions/loadbalancerv2.py

@ -669,3 +669,7 @@ class LoadBalancerPluginBaseV2(service_base.ServicePluginBase):
@abc.abstractmethod
def delete_l7policy_rule(self, context, id, l7policy_id):
pass
@abc.abstractmethod
def create_graph(self, context, graph):
pass

83
neutron_lbaas/services/loadbalancer/data_models.py

@ -606,6 +606,8 @@ class L7Policy(BaseDataModel):
if self.listener:
ret_dict['listeners'].append({'id': self.listener.id})
ret_dict['rules'] = [{'id': rule.id} for rule in self.rules]
if ret_dict.get('action') == l_const.L7_POLICY_ACTION_REDIRECT_TO_POOL:
del ret_dict['redirect_url']
return ret_dict
@classmethod
@ -662,7 +664,7 @@ class Listener(BaseDataModel):
ret_dict = super(Listener, self).to_dict(
loadbalancer=False, loadbalancer_id=False, default_pool=False,
operating_status=False, provisioning_status=False,
sni_containers=False)
sni_containers=False, default_tls_container=False)
# NOTE(blogan): Returning a list to future proof for M:N objects
# that are not yet implemented.
ret_dict['loadbalancers'] = []
@ -671,7 +673,8 @@ class Listener(BaseDataModel):
ret_dict['sni_container_refs'] = [container.tls_container_id
for container in self.sni_containers]
ret_dict['default_tls_container_ref'] = self.default_tls_container_id
ret_dict['l7_policies'] = [{'id': l7_policy.id}
del ret_dict['l7_policies']
ret_dict['l7policies'] = [{'id': l7_policy.id}
for l7_policy in self.l7_policies]
return ret_dict
@ -724,12 +727,80 @@ class LoadBalancer(BaseDataModel):
def attached_to_loadbalancer(self):
return True
def to_api_dict(self):
def _construct_full_graph_api_dict(self):
api_listeners = []
for listener in self.listeners:
api_listener = listener.to_api_dict()
del api_listener['loadbalancers']
del api_listener['default_pool_id']
if listener.default_pool:
api_pool = listener.default_pool.to_api_dict()
del api_pool['listeners']
del api_pool['listener']
del api_pool['listener_id']
del api_pool['healthmonitor_id']
del api_pool['loadbalancers']
del api_pool['l7_policies']
del api_pool['sessionpersistence']
if listener.default_pool.healthmonitor:
api_hm = listener.default_pool.healthmonitor.to_api_dict()
del api_hm['pools']
api_pool['healthmonitor'] = api_hm
api_pool['members'] = []
for member in listener.default_pool.members:
api_member = member.to_api_dict()
del api_member['pool_id']
api_pool['members'].append(api_member)
api_listener['default_pool'] = api_pool
if listener.l7_policies and len(listener.l7_policies) > 0:
api_l7policies = []
for l7policy in listener.l7_policies:
api_l7policy = l7policy.to_api_dict()
del api_l7policy['redirect_pool_id']
del api_l7policy['listeners']
if l7policy.rules and len(l7policy.rules) > 0:
api_l7rules = []
for l7rule in l7policy.rules:
api_l7rule = l7rule.to_api_dict()
del api_l7rule['policies']
api_l7rules.append(api_l7rule)
api_l7policy['rules'] = api_l7rules
if l7policy.redirect_pool:
api_r_pool = l7policy.redirect_pool.to_api_dict()
if l7policy.redirect_pool.healthmonitor:
api_r_hm = (l7policy.redirect_pool.healthmonitor.
to_api_dict())
del api_r_hm['pools']
api_r_pool['healthmonitor'] = api_r_hm
api_r_pool['members'] = []
for r_member in l7policy.redirect_pool.members:
api_r_member = r_member.to_api_dict()
del api_r_member['pool_id']
api_r_pool['members'].append(api_r_member)
del api_r_pool['listeners']
del api_r_pool['listener']
del api_r_pool['listener_id']
del api_r_pool['healthmonitor_id']
del api_r_pool['loadbalancers']
del api_r_pool['l7_policies']
del api_r_pool['sessionpersistence']
api_l7policy['redirect_pool'] = api_r_pool
api_l7policies.append(api_l7policy)
api_listener['l7policies'] = api_l7policies
api_listeners.append(api_listener)
return api_listeners
def to_api_dict(self, full_graph=False):
ret_dict = super(LoadBalancer, self).to_dict(
vip_port=False, stats=False, listeners=False)
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
if full_graph:
ret_dict['listeners'] = self._construct_full_graph_api_dict()
del ret_dict['pools']
else:
ret_dict['listeners'] = [{'id': listener.id}
for listener in self.listeners]
ret_dict['pools'] = [{'id': pool.id} for pool in self.pools]
if self.provider:
ret_dict['provider'] = self.provider.provider_name

147
neutron_lbaas/services/loadbalancer/plugin.py

@ -12,10 +12,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import copy
from neutron.api.v2 import attributes as attrs
from neutron.api.v2 import base as napi_base
from neutron import context as ncontext
from neutron.db import servicetype_db as st_db
from neutron.extensions import flavors
@ -29,6 +29,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
from neutron_lbaas._i18n import _LI, _LE
from neutron_lbaas import agent_scheduler as agent_scheduler_v2
@ -37,6 +38,8 @@ from neutron_lbaas.common.tls_utils import cert_parser
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldbv2
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.extensions import l7
from neutron_lbaas.extensions import lb_graph as lb_graph_ext
from neutron_lbaas.extensions import lbaas_agentschedulerv2
from neutron_lbaas.extensions import loadbalancer as lb_ext
from neutron_lbaas.extensions import loadbalancerv2
@ -386,7 +389,8 @@ class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
"shared_pools",
"l7",
"lbaas_agent_schedulerv2",
"service-type"]
"service-type",
"lb-graph"]
path_prefix = loadbalancerv2.LOADBALANCERV2_PREFIX
agent_notifiers = (
@ -475,15 +479,15 @@ class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
return self.default_provider
def _call_driver_operation(self, context, driver_method, db_entity,
old_db_entity=None):
old_db_entity=None, **kwargs):
manager_method = "%s.%s" % (driver_method.__self__.__class__.__name__,
driver_method.__name__)
LOG.info(_LI("Calling driver operation %s") % manager_method)
try:
if old_db_entity:
driver_method(context, old_db_entity, db_entity)
driver_method(context, old_db_entity, db_entity, **kwargs)
else:
driver_method(context, db_entity)
driver_method(context, db_entity, **kwargs)
# catching and reraising agent issues
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as no_agent:
@ -562,6 +566,113 @@ class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
loadbalancer['provider'] = provider
def _get_tweaked_resource_attribute_map(self):
memo = {id(attrs.ATTR_NOT_SPECIFIED): attrs.ATTR_NOT_SPECIFIED}
ram = copy.deepcopy(attrs.RESOURCE_ATTRIBUTE_MAP, memo=memo)
del ram['listeners']['loadbalancer_id']
del ram['pools']['listener_id']
del ram['healthmonitors']['pool_id']
for resource in ram:
if resource in lb_graph_ext.EXISTING_ATTR_GRAPH_ATTR_MAP:
ram[resource].update(
lb_graph_ext.EXISTING_ATTR_GRAPH_ATTR_MAP[resource])
return ram
def _prepare_loadbalancer_graph(self, context, loadbalancer):
"""Prepares the entire user requested body of a load balancer graph
To minimize code duplication, this method reuses the neutron API
controller's method to do all the validation, conversion, and
defaulting of each resource. This reuses the RESOURCE_ATTRIBUTE_MAP
and SUB_RESOURCE_ATTRIBUTE_MAP from the extension to enable this.
"""
# NOTE(blogan): it is assumed the loadbalancer attributes have already
# passed through the prepare_request_body method by nature of the
# normal neutron wsgi workflow. So we start with listeners since
# that probably has not passed through the neutron wsgi workflow.
ram = self._get_tweaked_resource_attribute_map()
# NOTE(blogan): members are not populated in the attributes.RAM so
# our only option is to use the original extension definition of member
# to validate. If members ever need something added to it then it too
# will need to be added here.
prepped_lb = napi_base.Controller.prepare_request_body(
context, {'loadbalancer': loadbalancer}, True, 'loadbalancer',
ram['loadbalancers']
)
sub_ram = loadbalancerv2.SUB_RESOURCE_ATTRIBUTE_MAP
sub_ram.update(l7.SUB_RESOURCE_ATTRIBUTE_MAP)
prepped_listeners = []
for listener in loadbalancer.get('listeners', []):
prepped_listener = napi_base.Controller.prepare_request_body(
context, {'listener': listener}, True, 'listener',
ram['listeners'])
l7policies = listener.get('l7policies')
if l7policies and l7policies != attrs.ATTR_NOT_SPECIFIED:
prepped_policies = []
for policy in l7policies:
prepped_policy = napi_base.Controller.prepare_request_body(
context, {'l7policy': policy}, True, 'l7policy',
ram['l7policies'])
l7rules = policy.get('rules')
redirect_pool = policy.get('redirect_pool')
if l7rules and l7rules != attrs.ATTR_NOT_SPECIFIED:
prepped_rules = []
for rule in l7rules:
prepped_rule = (
napi_base.Controller.prepare_request_body(
context, {'l7rule': rule}, True, 'l7rule',
sub_ram['rules']['parameters']))
prepped_rules.append(prepped_rule)
prepped_policy['l7_rules'] = prepped_rules
if (redirect_pool and
redirect_pool != attrs.ATTR_NOT_SPECIFIED):
prepped_r_pool = (
napi_base.Controller.prepare_request_body(
context, {'pool': redirect_pool}, True, 'pool',
ram['pools']))
prepped_r_members = []
for member in redirect_pool.get('members', []):
prepped_r_member = (
napi_base.Controller.prepare_request_body(
context, {'member': member},
True, 'member',
sub_ram['members']['parameters']))
prepped_r_members.append(prepped_r_member)
prepped_r_pool['members'] = prepped_r_members
r_hm = redirect_pool.get('healthmonitor')
if r_hm and r_hm != attrs.ATTR_NOT_SPECIFIED:
prepped_r_hm = (
napi_base.Controller.prepare_request_body(
context, {'healthmonitor': r_hm},
True, 'healthmonitor',
ram['healthmonitors']))
prepped_r_pool['healthmonitor'] = prepped_r_hm
prepped_policy['redirect_pool'] = redirect_pool
prepped_policies.append(prepped_policy)
prepped_listener['l7_policies'] = prepped_policies
pool = listener.get('default_pool')
if pool and pool != attrs.ATTR_NOT_SPECIFIED:
prepped_pool = napi_base.Controller.prepare_request_body(
context, {'pool': pool}, True, 'pool',
ram['pools'])
prepped_members = []
for member in pool.get('members', []):
prepped_member = napi_base.Controller.prepare_request_body(
context, {'member': member}, True, 'member',
sub_ram['members']['parameters'])
prepped_members.append(prepped_member)
prepped_pool['members'] = prepped_members
hm = pool.get('healthmonitor')
if hm and hm != attrs.ATTR_NOT_SPECIFIED:
prepped_hm = napi_base.Controller.prepare_request_body(
context, {'healthmonitor': hm}, True, 'healthmonitor',
ram['healthmonitors'])
prepped_pool['healthmonitor'] = prepped_hm
prepped_listener['default_pool'] = prepped_pool
prepped_listeners.append(prepped_listener)
prepped_lb['listeners'] = prepped_listeners
return loadbalancer
def create_loadbalancer(self, context, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
if loadbalancer['flavor_id'] != attrs.ATTR_NOT_SPECIFIED:
@ -583,6 +694,30 @@ class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2):
self._call_driver_operation(context, create_method, lb_db)
return self.db.get_loadbalancer(context, lb_db.id).to_api_dict()
def create_graph(self, context, graph):
loadbalancer = graph.get('graph', {}).get('loadbalancer')
loadbalancer = self._prepare_loadbalancer_graph(context, loadbalancer)
if loadbalancer['flavor_id'] != attrs.ATTR_NOT_SPECIFIED:
self._insert_provider_name_from_flavor(context, loadbalancer)
else:
del loadbalancer['flavor_id']
provider_name = self._get_provider_name(loadbalancer)
driver = self.drivers[provider_name]
if not driver.load_balancer.allows_create_graph:
raise lb_graph_ext.ProviderCannotCreateLoadBalancerGraph
lb_db = self.db.create_loadbalancer_graph(
context, loadbalancer,
allocate_vip=not driver.load_balancer.allocates_vip)
self.service_type_manager.add_resource_association(
context, constants.LOADBALANCERV2, provider_name, lb_db.id)
create_method = (driver.load_balancer.create_and_allocate_vip
if driver.load_balancer.allocates_vip
else driver.load_balancer.create)
self._call_driver_operation(context, create_method, lb_db)
api_lb = {'loadbalancer': self.db.get_loadbalancer(
context, lb_db.id).to_api_dict(full_graph=True)}
return api_lb
def update_loadbalancer(self, context, id, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
old_lb = self.db.get_loadbalancer(context, id)

12
neutron_lbaas/tests/base.py

@ -15,6 +15,7 @@
#
import mock
import webob
from neutron.db import servicetype_db as st_db
from neutron.tests import base as n_base
@ -206,6 +207,17 @@ class NeutronDbPluginV2TestCase(
expected_res.reverse()
self.assertEqual(expected_res, [n['id'] for n in item_res])
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
neutron_context=None, subresource=None, sub_id=None):
req = self.new_delete_request(collection, id, subresource=subresource,
sub_id=sub_id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(res.status_int, expected_code)
class ExtensionTestCase(ext_base.ExtensionTestCase):
pass

420
neutron_lbaas/tests/unit/db/loadbalancer/test_db_loadbalancerv2.py

@ -40,6 +40,7 @@ from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.drivers.logging_noop import driver as noop_driver
import neutron_lbaas.extensions
from neutron_lbaas.extensions import l7
from neutron_lbaas.extensions import lb_graph
from neutron_lbaas.extensions import loadbalancerv2
from neutron_lbaas.extensions import sharedpools
from neutron_lbaas.services.loadbalancer import constants as lb_const
@ -64,12 +65,14 @@ _subnet_id = "0c798ed8-33ba-11e2-8b28-000c291c4d14"
class LbaasTestMixin(object):
resource_keys = list(loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.keys())
resource_keys.extend(l7.RESOURCE_ATTRIBUTE_MAP.keys())
resource_keys.extend(lb_graph.RESOURCE_ATTRIBUTE_MAP.keys())
resource_prefix_map = dict(
(k, loadbalancerv2.LOADBALANCERV2_PREFIX)
for k in resource_keys)
def _get_loadbalancer_optional_args(self):
return 'description', 'vip_address', 'admin_state_up', 'name'
return ('description', 'vip_address', 'admin_state_up', 'name',
'listeners')
def _create_loadbalancer(self, fmt, subnet_id,
expected_res_status=None, **kwargs):
@ -87,6 +90,22 @@ class LbaasTestMixin(object):
return lb_res
def _create_graph(self, fmt, subnet_id, expected_res_status=None,
**kwargs):
data = {'vip_subnet_id': subnet_id, 'tenant_id': self._tenant_id}
args = self._get_loadbalancer_optional_args()
for arg in args:
if arg in kwargs and kwargs[arg] is not None:
data[arg] = kwargs[arg]
data = {'graph': {'loadbalancer': data, 'tenant_id': self._tenant_id}}
lb_req = self.new_create_request('graphs', data, fmt)
lb_res = lb_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, lb_res.status_int)
return lb_res
def _get_listener_optional_args(self):
return ('name', 'description', 'default_pool_id', 'loadbalancer_id',
'connection_limit', 'admin_state_up',
@ -249,15 +268,70 @@ class LbaasTestMixin(object):
tmp_subnet['subnet']['id'],
**kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(
exc = webob.exc.HTTPClientError(
explanation=_("Unexpected error code: %s") %
res.status_int
)
exc.code = res.status_int
exc.status_code = res.status_int
raise exc
lb = self.deserialize(fmt or self.fmt, res)
yield lb
if not no_delete:
self._delete('loadbalancers', lb['loadbalancer']['id'])
@contextlib.contextmanager
def graph(self, fmt=None, subnet=None, no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
with test_db_base_plugin_v2.optional_ctx(
subnet, self.subnet) as tmp_subnet:
res = self._create_graph(fmt, tmp_subnet['subnet']['id'],
**kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
exc = webob.exc.HTTPClientError(
explanation=_("Unexpected error code: %s") %
res.status_int
)
exc.code = res.status_int
exc.status_code = res.status_int
raise exc
graph = self.deserialize(fmt or self.fmt, res)
yield graph
if not no_delete:
# delete loadbalancer children if this was a loadbalancer
# graph create call
lb = graph['graph']['loadbalancer']
for listener in lb.get('listeners', []):
pool = listener.get('default_pool')
if pool:
hm = pool.get('healthmonitor')
if hm:
self._delete('healthmonitors', hm['id'])
members = pool.get('members', [])
for member in members:
self._delete('pools', pool['id'],
subresource='members',
sub_id=member['id'])
self._delete('pools', pool['id'])
policies = listener.get('l7policies', [])
for policy in policies:
r_pool = policy.get('redirect_pool')
if r_pool:
r_hm = r_pool.get('healthmonitor')
if r_hm:
self._delete('healthmonitors', r_hm['id'])
r_members = r_pool.get('members', [])
for r_member in r_members:
self._delete('pools', r_pool['id'],
subresource='members',
sub_id=r_member['id'])
self._delete('pools', r_pool['id'])
self._delete('listeners', listener['id'])
self._delete('loadbalancers', lb['id'])
@contextlib.contextmanager
def listener(self, fmt=None, protocol='HTTP', loadbalancer_id=None,
protocol_port=80, default_pool_id=None, no_delete=False,
@ -348,14 +422,8 @@ class LbaasTestMixin(object):
member = self.deserialize(fmt or self.fmt, res)
yield member
if not no_delete:
del_req = self.new_delete_request(
'pools',
fmt=fmt,
id=pool_id,
subresource='members',
sub_id=member['member']['id'])
del_res = del_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, del_res.status_int)
self._delete('pools', id=pool_id, subresource='members',
sub_id=member['member']['id'])
@contextlib.contextmanager
def healthmonitor(self, fmt=None, pool_id='pool1id', type='TCP', delay=1,
@ -461,6 +529,8 @@ class ExtendedPluginAwareExtensionManager(object):
extensions_list.append(sharedpools)
if 'l7' in self.extension_aliases:
extensions_list.append(l7)
if 'lb-graph' in self.extension_aliases:
extensions_list.append(lb_graph)
for extension in extensions_list:
if 'RESOURCE_ATTRIBUTE_MAP' in extension.__dict__:
loadbalancerv2.RESOURCE_ATTRIBUTE_MAP.update(
@ -959,6 +1029,336 @@ class LoadBalancerDelegateVIPCreation(LbaasPluginDbTestCase):
self.assertIsNotNone(port)
class TestLoadBalancerGraphCreation(LbaasPluginDbTestCase):
def _assert_graphs_equal(self, expected_graph, observed_graph):
observed_graph_copy = copy.deepcopy(observed_graph)
for k in ('id', 'vip_address', 'vip_subnet_id'):
self.assertTrue(observed_graph_copy.get(k, None))
expected_graph['id'] = observed_graph_copy['id']
expected_graph['vip_port_id'] = observed_graph_copy['vip_port_id']
expected_listeners = expected_graph.pop('listeners', [])
observed_listeners = observed_graph_copy.pop('listeners', [])
actual = dict((k, v)
for k, v in observed_graph_copy.items()
if k in expected_graph)
self.assertEqual(expected_graph, actual)
for observed_listener in observed_listeners:
self.assertTrue(observed_listener.get('id'))
observed_listener.pop('id')
default_pool = observed_listener.get('default_pool')
l7_policies = observed_listener.get('l7policies')
if default_pool:
self.assertTrue(default_pool.get('id'))
default_pool.pop('id')
hm = default_pool.get('healthmonitor')
if hm:
self.assertTrue(hm.get('id'))
hm.pop('id')
for member in default_pool.get('members', []):
self.assertTrue(member.get('id'))
member.pop('id')
if l7_policies:
for policy in l7_policies:
self.assertTrue(policy.get('id'))
policy.pop('id')
r_pool = policy.get('redirect_pool')
rules = policy.get('rules')
if r_pool:
self.assertTrue(r_pool.get('id'))
r_pool.pop('id')
r_hm = r_pool.get('healthmonitor')
if r_hm:
self.assertTrue(r_hm.get('id'))
r_hm.pop('id')
for r_member in r_pool.get('members', []):
self.assertTrue(r_member.get('id'))
r_member.pop('id')
if rules:
for rule in rules:
self.assertTrue(rule.get('id'))
rule.pop('id')
self.assertIn(observed_listener, expected_listeners)
def _validate_graph_statuses(self, graph):
lb_id = graph['id']
for listener in graph.get('listeners', []):
kwargs = {'listener_id': listener['id']}
pool = listener.get('default_pool')
if pool:
kwargs['pool_id'] = pool['id']
hm = pool.get('health_monitor')
if hm:
kwargs['hm_id'] = hm['id']
for member in pool.get('members', []):
kwargs['member_id'] = member['id']
self._validate_statuses(lb_id, **kwargs)
if pool.get('members'):
continue
self._validate_statuses(lb_id, **kwargs)
def _get_expected_lb(self, expected_listeners):
expected_lb = {
'name': 'vip1',
'description': '',
'admin_state_up': True,
'provisioning_status': constants.ACTIVE,
'operating_status'