Merge feature/lbaasv2

New extension for version 2 of LBaaS API

Also added some constants for supported protocols, algorithms,
health monitor types, and session persistence types.

Implement Jinja templates for haproxy config

Added templates dir
Added haproxy v1.4 template
Added template tests
Added jinja_cfg for new haproxy jinja templating

Tests for extension, db and plugin for LBaaS V2

Adding needed driver interface changes for tests.
Adding LoggingNoop driver needed changes for tests.
Adding extension, plugin, and db unit tests.

Plugin/DB additions for version 2 of LBaaS API

Added alembic migrations, models, db methods and plugin methods.
Added DEFERRED status for entities not linked to a load balancer.
Added to_dict method in BaseNeutron model.
Sql Alchemy models in its own module.
Plugin database methods are accessed by composition through the plugin.
Added data models to convert sql alchemy models before passing to driver.

Partially-implements: blueprint lbaas-api-and-objmodel-improvement
Partially-implements: blueprint lbaas-refactor-haproxy-namespace-driver-to-n
Partially-implements: blueprint services-split

Change-Id: I8343d83c645f3037ac237d7f47744c1c7e4356f8
Co-Authored-By: Brandon Logan <brandon.logan@rackspace.com>
Co-Authored-By: Phillip Toohill <phillip.toohill@rackspace.com>
Co-Authored-By: Dustin Lundquist <dustin@null-ptr.net>
Co-authored-by: Vijayendra Bhamidipati <vbhamidipati@paypal.com>
Co-authored-by: Craig Tracey <Craig.Tracey@gmail.com>
Co-authored-by: Pattabi Ayyasami <pattabi@brocade.com>
This commit is contained in:
Doug Wiegley 2014-12-11 19:02:11 -07:00 committed by Brandon Logan
parent 3db31d134b
commit c7c77b33ba
22 changed files with 5231 additions and 78 deletions

View File

@ -0,0 +1,504 @@
#
# Copyright 2014 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db import exception
from oslo.utils import excutils
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.db import common_db_mixin as base_db
from neutron.extensions import loadbalancerv2
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
class LoadBalancerPluginDbv2(base_db.CommonDbMixin):
"""Wraps loadbalancer with SQLAlchemy models.
A class that wraps the implementation of the Neutron loadbalancer
plugin database access interface using SQLAlchemy models.
"""
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_resource(self, context, model, id, for_update=False):
resource = None
try:
if for_update:
query = self._model_query(context, model).filter(
model.id == id).with_lockmode('update')
resource = query.one()
else:
resource = self._get_by_id(context, model, id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, (models.LoadBalancer, models.Listener,
models.PoolV2, models.MemberV2,
models.HealthMonitorV2,
models.LoadBalancerStatistics,
models.SessionPersistenceV2)):
raise loadbalancerv2.EntityNotFound(name=model.NAME, id=id)
ctx.reraise = True
return resource
def _resource_exists(self, context, model, id):
try:
self._get_by_id(context, model, id)
except exc.NoResultFound:
return False
return True
def _get_resources(self, context, model, filters=None):
query = self._get_collection_query(context, model,
filters=filters)
return [model_instance for model_instance in query]
def _create_port_for_load_balancer(self, context, lb_db, ip_address):
# resolve subnet and create port
subnet = self._core_plugin.get_subnet(context, lb_db.vip_subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': lb_db.tenant_id,
'name': 'loadbalancer-' + lb_db.id,
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': '',
'fixed_ips': [fixed_ip]
}
port = self._core_plugin.create_port(context, {'port': port_data})
lb_db.vip_port_id = port['id']
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == lb_db.vip_subnet_id:
lb_db.vip_address = fixed_ip['ip_address']
break
# explicitly sync session with db
context.session.flush()
def _create_loadbalancer_stats(self, context, loadbalancer_id, data=None):
# This is internal method to add load balancer statistics. It won't
# be exposed to API
data = data or {}
stats_db = models.LoadBalancerStatistics(
loadbalancer_id=loadbalancer_id,
bytes_in=data.get(lb_const.STATS_IN_BYTES, 0),
bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0),
active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0),
total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0)
)
return stats_db
def _delete_loadbalancer_stats(self, context, loadbalancer_id):
# This is internal method to delete pool statistics. It won't
# be exposed to API
with context.session.begin(subtransactions=True):
stats_qry = context.session.query(models.LoadBalancerStatistics)
try:
stats = stats_qry.filter_by(
loadbalancer_id=loadbalancer_id).one()
except exc.NoResultFound:
raise loadbalancerv2.EntityNotFound(
name=models.LoadBalancerStatistics.NAME,
id=loadbalancer_id)
context.session.delete(stats)
def _load_id_and_tenant_id(self, context, model_dict):
model_dict['id'] = uuidutils.generate_uuid()
model_dict['tenant_id'] = self._get_tenant_id_for_create(
context, model_dict)
def assert_modification_allowed(self, obj):
status = getattr(obj, 'status', None)
if status in [constants.PENDING_DELETE, constants.PENDING_UPDATE,
constants.PENDING_CREATE]:
id = getattr(obj, 'id', None)
raise loadbalancerv2.StateInvalid(id=id, state=status)
def test_and_set_status(self, context, model, id, status):
with context.session.begin(subtransactions=True):
model_db = self._get_resource(context, model, id, for_update=True)
self.assert_modification_allowed(model_db)
if model_db.status != status:
model_db.status = status
def update_status(self, context, model, id, status):
with context.session.begin(subtransactions=True):
if issubclass(model, models.LoadBalancer):
try:
model_db = (self._model_query(context, model).
filter(model.id == id).
options(orm.noload('vip_port')).
one())
except exc.NoResultFound:
raise loadbalancerv2.EntityNotFound(
name=models.LoadBalancer.NAME, id=id)
else:
model_db = self._get_resource(context, model, id)
if model_db.status != status:
model_db.status = status
def create_loadbalancer(self, context, loadbalancer):
with context.session.begin(subtransactions=True):
self._load_id_and_tenant_id(context, loadbalancer)
vip_address = loadbalancer.pop('vip_address')
loadbalancer['status'] = constants.PENDING_CREATE
lb_db = models.LoadBalancer(**loadbalancer)
context.session.add(lb_db)
context.session.flush()
lb_db.stats = self._create_loadbalancer_stats(
context, lb_db.id)
context.session.add(lb_db)
# create port outside of lb create transaction since it can sometimes
# cause lock wait timeouts
try:
self._create_port_for_load_balancer(context, lb_db, vip_address)
except Exception:
with excutils.save_and_reraise_exception():
context.session.delete(lb_db)
context.session.flush()
return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
def update_loadbalancer(self, context, id, loadbalancer):
with context.session.begin(subtransactions=True):
lb_db = self._get_resource(context, models.LoadBalancer, id)
lb_db.update(loadbalancer)
return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
def delete_loadbalancer(self, context, id):
with context.session.begin(subtransactions=True):
lb_db = self._get_resource(context, models.LoadBalancer, id)
context.session.delete(lb_db)
if lb_db.vip_port:
self._core_plugin.delete_port(context, lb_db.vip_port_id)
def get_loadbalancers(self, context, filters=None):
lb_dbs = self._get_resources(context, models.LoadBalancer,
filters=filters)
return [data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
for lb_db in lb_dbs]
def get_loadbalancer(self, context, id):
lb_db = self._get_resource(context, models.LoadBalancer, id)
return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
def _validate_listener_data(self, context, listener):
pool_id = listener.get('default_pool_id')
lb_id = listener.get('loadbalancer_id')
if lb_id:
if not self._resource_exists(context, models.LoadBalancer,
lb_id):
raise loadbalancerv2.EntityNotFound(
name=models.LoadBalancer.NAME, id=lb_id)
if pool_id:
pool = self._get_resource(context, models.PoolV2, pool_id)
if pool.protocol != listener.get('protocol'):
raise loadbalancerv2.ListenerPoolProtocolMismatch(
listener_proto=listener['protocol'],
pool_proto=pool.protocol)
filters = {'default_pool_id': [pool_id]}
listenerpools = self._get_resources(context,
models.Listener,
filters=filters)
if listenerpools:
raise loadbalancerv2.EntityInUse(
entity_using=models.Listener.NAME,
id=listenerpools[0].id,
entity_in_use=models.PoolV2.NAME)
def create_listener(self, context, listener):
try:
with context.session.begin(subtransactions=True):
self._load_id_and_tenant_id(context, listener)
listener['status'] = constants.PENDING_CREATE
# Check for unspecified loadbalancer_id and listener_id and
# set to None
for id in ['loadbalancer_id', 'default_pool_id']:
if listener.get(id) == attributes.ATTR_NOT_SPECIFIED:
listener[id] = None
self._validate_listener_data(context, listener)
listener_db_entry = models.Listener(**listener)
context.session.add(listener_db_entry)
except exception.DBDuplicateEntry:
raise loadbalancerv2.LoadBalancerListenerProtocolPortExists(
lb_id=listener['loadbalancer_id'],
protocol_port=listener['protocol_port'])
return data_models.Listener.from_sqlalchemy_model(listener_db_entry)
def update_listener(self, context, id, listener):
with context.session.begin(subtransactions=True):
listener_db = self._get_resource(context, models.Listener, id)
# Do not allow changing loadbalancer ids
if listener_db.loadbalancer_id and listener.get('loadbalancer_id'):
raise loadbalancerv2.AttributeIDImmutable(
attribute='loadbalancer_id')
# Do not allow changing pool ids
if listener_db.default_pool_id and listener.get('default_pool_id'):
raise loadbalancerv2.AttributeIDImmutable(
attribute='default_pool_id')
if not listener.get('protocol'):
# User did not intend to change the protocol so we will just
# use the same protocol already stored so the validation knows
listener['protocol'] = listener_db.protocol
self._validate_listener_data(context, listener)
listener_db.update(listener)
context.session.refresh(listener_db)
return data_models.Listener.from_sqlalchemy_model(listener_db)
def delete_listener(self, context, id):
listener_db_entry = self._get_resource(context, models.Listener, id)
with context.session.begin(subtransactions=True):
context.session.delete(listener_db_entry)
def get_listeners(self, context, filters=None):
listener_dbs = self._get_resources(context, models.Listener,
filters=filters)
return [data_models.Listener.from_sqlalchemy_model(listener_db)
for listener_db in listener_dbs]
def get_listener(self, context, id):
listener_db = self._get_resource(context, models.Listener, id)
return data_models.Listener.from_sqlalchemy_model(listener_db)
def _create_session_persistence_db(self, session_info, pool_id):
session_info['pool_id'] = pool_id
return models.SessionPersistenceV2(**session_info)
def _update_pool_session_persistence(self, context, pool_id, info):
pool = self._get_resource(context, models.PoolV2, pool_id)
with context.session.begin(subtransactions=True):
# Update sessionPersistence table
sess_qry = context.session.query(models.SessionPersistenceV2)
sesspersist_db = sess_qry.filter_by(pool_id=pool_id).first()
# Insert a None cookie_info if it is not present to overwrite an
# existing value in the database.
if 'cookie_name' not in info:
info['cookie_name'] = None
if sesspersist_db:
sesspersist_db.update(info)
else:
info['pool_id'] = pool_id
sesspersist_db = models.SessionPersistenceV2(**info)
context.session.add(sesspersist_db)
# Update pool table
pool.session_persistence = sesspersist_db
context.session.add(pool)
def _delete_session_persistence(self, context, pool_id):
with context.session.begin(subtransactions=True):
sess_qry = context.session.query(models.SessionPersistenceV2)
sess_qry.filter_by(pool_id=pool_id).delete()
def create_pool(self, context, pool):
with context.session.begin(subtransactions=True):
self._load_id_and_tenant_id(context, pool)
pool['status'] = constants.PENDING_CREATE
if pool['healthmonitor_id'] == attributes.ATTR_NOT_SPECIFIED:
pool['healthmonitor_id'] = None
hm_id = pool['healthmonitor_id']
if hm_id:
if not self._resource_exists(context, models.HealthMonitorV2,
hm_id):
raise loadbalancerv2.EntityNotFound(
name=models.HealthMonitorV2.NAME, id=hm_id)
filters = {'healthmonitor_id': [hm_id]}
hmpools = self._get_resources(context,
models.PoolV2,
filters=filters)
if hmpools:
raise loadbalancerv2.EntityInUse(
entity_using=models.PoolV2.NAME,
id=hmpools[0].id,
entity_in_use=models.HealthMonitorV2.NAME)
session_info = pool.pop('session_persistence')
pool_db = models.PoolV2(**pool)
if session_info:
s_p = self._create_session_persistence_db(session_info,
pool_db.id)
pool_db.sessionpersistence = s_p
context.session.add(pool_db)
return data_models.Pool.from_sqlalchemy_model(pool_db)
def update_pool(self, context, id, pool):
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, models.PoolV2, id)
hm_id = pool.get('healthmonitor_id')
if hm_id:
if pool_db.healthmonitor and hm_id:
raise loadbalancerv2.AttributeIDImmutable(
attribute='healthmonitor_id')
if not self._resource_exists(context, models.HealthMonitorV2,
hm_id):
raise loadbalancerv2.EntityNotFound(
name=models.HealthMonitorV2.NAME,
id=hm_id)
filters = {'healthmonitor_id': [hm_id]}
hmpools = self._get_resources(context,
models.PoolV2,
filters=filters)
if hmpools:
raise loadbalancerv2.EntityInUse(
entity_using=models.PoolV2.NAME,
id=hmpools[0].id,
entity_in_use=models.HealthMonitorV2.NAME)
sp = pool.pop('session_persistence', None)
if sp:
self._update_pool_session_persistence(context, id, sp)
else:
self._delete_session_persistence(context, id)
pool_db.update(pool)
context.session.refresh(pool_db)
return data_models.Pool.from_sqlalchemy_model(pool_db)
def delete_pool(self, context, id):
with context.session.begin(subtransactions=True):
pool_db = self._get_resource(context, models.PoolV2, id)
context.session.delete(pool_db)
def get_pools(self, context, filters=None):
pool_dbs = self._get_resources(context, models.PoolV2, filters=filters)
return [data_models.Pool.from_sqlalchemy_model(pool_db)
for pool_db in pool_dbs]
def get_pool(self, context, id):
pool_db = self._get_resource(context, models.PoolV2, id)
return data_models.Pool.from_sqlalchemy_model(pool_db)
def create_pool_member(self, context, member, pool_id):
try:
with context.session.begin(subtransactions=True):
if not self._resource_exists(context, models.PoolV2, pool_id):
raise loadbalancerv2.EntityNotFound(
name=models.PoolV2.NAME, id=pool_id)
self._load_id_and_tenant_id(context, member)
member['pool_id'] = pool_id
member['status'] = constants.PENDING_CREATE
member_db = models.MemberV2(**member)
context.session.add(member_db)
except exception.DBDuplicateEntry:
raise loadbalancerv2.MemberExists(address=member['address'],
port=member['protocol_port'],
pool=pool_id)
return data_models.Member.from_sqlalchemy_model(member_db)
def update_pool_member(self, context, id, member, pool_id):
with context.session.begin(subtransactions=True):
if not self._resource_exists(context, models.PoolV2, pool_id):
raise loadbalancerv2.MemberNotFoundForPool(pool_id=pool_id,
member_id=id)
member_db = self._get_resource(context, models.MemberV2, id)
member_db.update(member)
context.session.refresh(member_db)
return data_models.Member.from_sqlalchemy_model(member_db)
def delete_pool_member(self, context, id, pool_id):
with context.session.begin(subtransactions=True):
if not self._resource_exists(context, models.PoolV2, pool_id):
raise loadbalancerv2.MemberNotFoundForPool(pool_id=pool_id,
member_id=id)
member_db = self._get_resource(context, models.MemberV2, id)
context.session.delete(member_db)
def get_pool_members(self, context, pool_id, filters=None):
filters = filters or {}
filters.update({'pool_id': [pool_id]})
member_dbs = self._get_resources(context, models.MemberV2,
filters=filters)
return [data_models.Member.from_sqlalchemy_model(member_db)
for member_db in member_dbs]
def get_pool_member(self, context, id, pool_id, filters=None):
# NOTE(blogan): filters parameter is needed because neutron's extension
# code calls this method with that parameter even though we do not
# need it
member_db = self._get_resource(context, models.MemberV2, id)
if member_db.pool_id != pool_id:
raise loadbalancerv2.MemberNotFoundForPool(member_id=id,
pool_id=pool_id)
return data_models.Member.from_sqlalchemy_model(member_db)
def delete_member(self, context, id):
with context.session.begin(subtransactions=True):
member_db = self._get_resource(context, models.MemberV2, id)
context.session.delete(member_db)
def create_healthmonitor(self, context, healthmonitor):
with context.session.begin(subtransactions=True):
self._load_id_and_tenant_id(context, healthmonitor)
healthmonitor['status'] = constants.PENDING_CREATE
hm_db_entry = models.HealthMonitorV2(**healthmonitor)
context.session.add(hm_db_entry)
return data_models.HealthMonitor.from_sqlalchemy_model(hm_db_entry)
def update_healthmonitor(self, context, id, healthmonitor):
with context.session.begin(subtransactions=True):
hm_db = self._get_resource(context, models.HealthMonitorV2, id)
hm_db.update(healthmonitor)
context.session.refresh(hm_db)
return data_models.HealthMonitor.from_sqlalchemy_model(hm_db)
def delete_healthmonitor(self, context, id):
with context.session.begin(subtransactions=True):
hm_db_entry = self._get_resource(context,
models.HealthMonitorV2, id)
context.session.delete(hm_db_entry)
def get_healthmonitor(self, context, id):
hm_db = self._get_resource(context, models.HealthMonitorV2, id)
return data_models.HealthMonitor.from_sqlalchemy_model(hm_db)
def get_healthmonitors(self, context, filters=None):
hm_dbs = self._get_resources(context, models.HealthMonitorV2,
filters=filters)
return [data_models.HealthMonitor.from_sqlalchemy_model(hm_db)
for hm_db in hm_dbs]
def update_loadbalancer_stats(self, context, loadbalancer_id, stats_data):
stats_data = stats_data or {}
with context.session.begin(subtransactions=True):
lb_db = self._get_resource(context, models.LoadBalancer,
loadbalancer_id)
self.assert_modification_allowed(lb_db)
lb_db.stats = self._create_loadbalancer_stats(context,
loadbalancer_id,
data=stats_data)
def stats(self, context, loadbalancer_id):
loadbalancer = self._get_resource(context, models.LoadBalancer,
loadbalancer_id)
return data_models.LoadBalancerStatistics.from_sqlalchemy_model(
loadbalancer.stats)

View File

@ -0,0 +1,207 @@
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db as st_db
import sqlalchemy as sa
from sqlalchemy import orm
from neutron_lbaas.services.loadbalancer import constants as lb_const
class SessionPersistenceV2(model_base.BASEV2):
__tablename__ = "lbaas_sessionpersistences"
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_pools.id"),
primary_key=True,
nullable=False)
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES,
name="lbaas_sesssionpersistences_typev2"),
nullable=False)
cookie_name = sa.Column(sa.String(1024), nullable=True)
class LoadBalancerStatistics(model_base.BASEV2):
"""Represents load balancer statistics."""
NAME = 'loadbalancer_stats'
__tablename__ = "lbaas_loadbalancer_statistics"
loadbalancer_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_loadbalancers.id"),
primary_key=True,
nullable=False)
bytes_in = sa.Column(sa.BigInteger, nullable=False)
bytes_out = sa.Column(sa.BigInteger, nullable=False)
active_connections = sa.Column(sa.BigInteger, nullable=False)
total_connections = sa.Column(sa.BigInteger, nullable=False)
@orm.validates('bytes_in', 'bytes_out',
'active_connections', 'total_connections')
def validate_non_negative_int(self, key, value):
if value < 0:
data = {'key': key, 'value': value}
raise ValueError(_('The %(key)s field can not have '
'negative value. '
'Current value is %(value)d.') % data)
return value
class MemberV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer member."""
NAME = 'member'
__tablename__ = "lbaas_members"
__table_args__ = (
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
name='uniq_pool_address_port_v2'),
)
pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
nullable=False)
address = sa.Column(sa.String(64), nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
weight = sa.Column(sa.Integer, nullable=True)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), nullable=True)
status = sa.Column(sa.String(16), nullable=False)
class HealthMonitorV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer healthmonitor."""
NAME = 'healthmonitor'
__tablename__ = "lbaas_healthmonitors"
type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES,
name="healthmonitors_typev2"),
nullable=False)
delay = sa.Column(sa.Integer, nullable=False)
timeout = sa.Column(sa.Integer, nullable=False)
max_retries = sa.Column(sa.Integer, nullable=False)
http_method = sa.Column(sa.String(16), nullable=True)
url_path = sa.Column(sa.String(255), nullable=True)
expected_codes = sa.Column(sa.String(64), nullable=True)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
class PoolV2(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer pool."""
NAME = 'pool'
__tablename__ = "lbaas_pools"
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
healthmonitor_id = sa.Column(sa.String(36),
sa.ForeignKey("lbaas_healthmonitors.id"),
unique=True,
nullable=True)
protocol = sa.Column(sa.Enum(*lb_const.SUPPORTED_PROTOCOLS,
name="pool_protocolsv2"),
nullable=False)
lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS,
name="lb_algorithmsv2"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
members = orm.relationship(MemberV2,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
healthmonitor = orm.relationship(
HealthMonitorV2,
backref=orm.backref("pool", uselist=False),
lazy='joined')
sessionpersistence = orm.relationship(
SessionPersistenceV2,
uselist=False,
backref=orm.backref("pool", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
class LoadBalancer(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron load balancer."""
NAME = 'loadbalancer'
__tablename__ = "lbaas_loadbalancers"
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
vip_subnet_id = sa.Column(sa.String(36), nullable=False)
vip_port_id = sa.Column(sa.String(36), sa.ForeignKey(
'ports.id', name='fk_lbaas_loadbalancers_ports_id'))
vip_address = sa.Column(sa.String(36))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vip_port = orm.relationship(models_v2.Port)
stats = orm.relationship(
LoadBalancerStatistics,
uselist=False,
backref=orm.backref("loadbalancer", uselist=False),
cascade="all, delete-orphan",
lazy='joined')
provider = orm.relationship(
st_db.ProviderResourceAssociation,
uselist=False,
lazy="joined",
primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id",
foreign_keys=[st_db.ProviderResourceAssociation.resource_id],
# this is only for old API backwards compatibility because when a load
# balancer is deleted the pool ID should be the same as the load
# balancer ID and should not be cleared out in this table
viewonly=True
)
class Listener(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 neutron listener."""
NAME = 'listener'
__tablename__ = "lbaas_listeners"
__table_args__ = (
sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port',
name='uniq_loadbalancer_listener_port'),
)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"),
unique=True)
loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey(
"lbaas_loadbalancers.id"))
protocol = sa.Column(sa.Enum(*lb_const.SUPPORTED_PROTOCOLS,
name="listener_protocolsv2"),
nullable=False)
protocol_port = sa.Column(sa.Integer, nullable=False)
connection_limit = sa.Column(sa.Integer)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
default_pool = orm.relationship(
PoolV2, backref=orm.backref("listener", uselist=False), lazy='joined')
loadbalancer = orm.relationship(
LoadBalancer, backref=orm.backref("listeners"), lazy='joined')

View File

@ -1 +1 @@
start_neutron_lbaas
lbaasv2

View File

@ -0,0 +1,168 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""lbaas version 2 api
Revision ID: lbaasv2
Revises: start_neutron_lbaas
Create Date: 2014-06-18 10:50:15.606420
"""
# revision identifiers, used by Alembic.
revision = 'lbaasv2'
down_revision = 'start_neutron_lbaas'
from alembic import op
import sqlalchemy as sa
listener_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="listener_protocolsv2")
pool_protocols = sa.Enum("HTTP", "HTTPS", "TCP",
name="pool_protocolsv2")
sesssionpersistences_type = sa.Enum("SOURCE_IP", "HTTP_COOKIE", "APP_COOKIE",
name="sesssionpersistences_typev2")
lb_algorithms = sa.Enum("ROUND_ROBIN", "LEAST_CONNECTIONS", "SOURCE_IP",
name="lb_algorithmsv2")
healthmonitors_type = sa.Enum("PING", "TCP", "HTTP", "HTTPS",
name="healthmonitors_typev2")
def upgrade():
op.create_table(
u'lbaas_healthmonitors',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'type', healthmonitors_type, nullable=False),
sa.Column(u'delay', sa.Integer(), nullable=False),
sa.Column(u'timeout', sa.Integer(), nullable=False),
sa.Column(u'max_retries', sa.Integer(), nullable=False),
sa.Column(u'http_method', sa.String(16), nullable=True),
sa.Column(u'url_path', sa.String(255), nullable=True),
sa.Column(u'expected_codes', sa.String(64), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_pools',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', pool_protocols, nullable=False),
sa.Column(u'lb_algorithm', lb_algorithms, nullable=False),
sa.Column(u'healthmonitor_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.UniqueConstraint(u'healthmonitor_id'),
sa.ForeignKeyConstraint([u'healthmonitor_id'],
[u'lbaas_healthmonitors.id'])
)
op.create_table(
u'lbaas_sessionpersistences',
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'type', sesssionpersistences_type, nullable=False),
sa.Column(u'cookie_name', sa.String(1024), nullable=True),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.PrimaryKeyConstraint(u'pool_id')
)
op.create_table(
u'lbaas_members',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'pool_id', sa.String(36), nullable=False),
sa.Column(u'subnet_id', sa.String(36), nullable=True),
sa.Column(u'address', sa.String(64), nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'weight', sa.Integer(), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
sa.ForeignKeyConstraint([u'pool_id'], [u'lbaas_pools.id']),
sa.UniqueConstraint(u'pool_id', u'address', u'protocol_port',
name=u'uniq_pool_address_port_v2')
)
op.create_table(
u'lbaas_loadbalancers',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'vip_port_id', sa.String(36), nullable=True),
sa.Column(u'vip_subnet_id', sa.String(36), nullable=False),
sa.Column(u'vip_address', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'vip_port_id'], [u'ports.id'],
name=u'fk_lbaas_loadbalancers_ports_id'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_listeners',
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'id', sa.String(36), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'description', sa.String(255), nullable=True),
sa.Column(u'protocol', listener_protocols, nullable=False),
sa.Column(u'protocol_port', sa.Integer(), nullable=False),
sa.Column(u'connection_limit', sa.Integer(), nullable=True),
sa.Column(u'loadbalancer_id', sa.String(36), nullable=True),
sa.Column(u'default_pool_id', sa.String(36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.Column(u'admin_state_up', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id']),
sa.ForeignKeyConstraint([u'default_pool_id'],
[u'lbaas_pools.id']),
sa.UniqueConstraint(u'default_pool_id'),
sa.UniqueConstraint(u'loadbalancer_id', u'protocol_port',
name=u'uniq_loadbalancer_listener_port'),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'lbaas_loadbalancer_statistics',
sa.Column(u'loadbalancer_id', sa.String(36), nullable=False),
sa.Column(u'bytes_in', sa.BigInteger(), nullable=False),
sa.Column(u'bytes_out', sa.BigInteger(), nullable=False),
sa.Column(u'active_connections', sa.BigInteger(), nullable=False),
sa.Column(u'total_connections', sa.BigInteger(), nullable=False),
sa.PrimaryKeyConstraint(u'loadbalancer_id'),
sa.ForeignKeyConstraint([u'loadbalancer_id'],
[u'lbaas_loadbalancers.id'])
)
def downgrade():
op.drop_table(u'lbaas_loadbalancer_statistics')
op.drop_table(u'lbaas_listeners')
listener_protocols.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_loadbalancers')
op.drop_table(u'lbaas_members')
op.drop_table(u'lbaas_sessionpersistences')
sesssionpersistences_type.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_pools')
pool_protocols.drop(op.get_bind(), checkfirst=False)
lb_algorithms.drop(op.get_bind(), checkfirst=False)
op.drop_table(u'lbaas_healthmonitors')
healthmonitors_type.drop(op.get_bind(), checkfirst=False)

View File

@ -13,22 +13,31 @@
# License for the specific language governing permissions and limitations
# under the License.
#FIXME(brandon-logan): change these to LB_ALGORITHM
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
SUPPORTED_LB_ALGORITHMS = (LB_METHOD_LEAST_CONNECTIONS, LB_METHOD_ROUND_ROBIN,
LB_METHOD_SOURCE_IP)
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP)
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
SUPPORTED_HEALTH_MONITOR_TYPES = (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS,
HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP)
SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
SUPPORTED_SP_TYPES = (SESSION_PERSISTENCE_SOURCE_IP,
SESSION_PERSISTENCE_HTTP_COOKIE,
SESSION_PERSISTENCE_APP_COOKIE)
STATS_ACTIVE_CONNECTIONS = 'active_connections'
STATS_MAX_CONNECTIONS = 'max_connections'
@ -43,3 +52,7 @@ STATS_RESPONSE_ERRORS = 'response_errors'
STATS_STATUS = 'status'
STATS_HEALTH = 'health'
STATS_FAILED_CHECKS = 'failed_checks'
# Constants to extend status strings in neutron.plugins.common.constants
# Status if an entity is not linked to a load balancer
DEFERRED = "DEFERRED"

View File

@ -0,0 +1,288 @@
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module holds the data models for the load balancer service plugin. These
are meant simply as replacement data structures for dictionaries and
SQLAlchemy models. Using dictionaries as data containers for many components
causes readability issues and does not intuitively give the benefits of what
classes and OO give. Using SQLAlchemy models as data containers for many
components can become an issue if you do not want to give certain components
access to the database.
These data models do provide methods for instantiation from SQLAlchemy models
and also converting to dictionaries.
"""
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import servicetype_db
from sqlalchemy.orm import collections
from neutron_lbaas.db.loadbalancer import models
class BaseDataModel(object):
# NOTE(brandon-logan) This does not discover dicts for relationship
# attributes.
def to_dict(self):
ret = {}
for attr in self.__dict__:
if (attr.startswith('_') or
isinstance(getattr(self, attr), BaseDataModel)):
continue
ret[attr] = self.__dict__[attr]
return ret
@classmethod
def from_sqlalchemy_model(cls, sa_model, calling_class=None):
instance = cls()
for attr_name in vars(instance):
if attr_name.startswith('_'):
continue
attr = getattr(sa_model, attr_name)
# Handles M:1 or 1:1 relationships
if isinstance(attr, model_base.BASEV2):
if hasattr(instance, attr_name):
data_class = SA_MODEL_TO_DATA_MODEL_MAP[attr.__class__]
if calling_class != data_class and data_class:
setattr(instance, attr_name,
data_class.from_sqlalchemy_model(
attr, calling_class=cls))
# Handles 1:M or M:M relationships
elif isinstance(attr, collections.InstrumentedList):
for item in attr:
if hasattr(instance, attr_name):
data_class = SA_MODEL_TO_DATA_MODEL_MAP[item.__class__]
attr_list = getattr(instance, attr_name) or []
attr_list.append(data_class.from_sqlalchemy_model(
item, calling_class=cls))
setattr(instance, attr_name, attr_list)
# This isn't a relationship so it must be a "primitive"
else:
setattr(instance, attr_name, getattr(sa_model, attr_name))
return instance
# NOTE(brandon-logan) IPAllocation, Port, and ProviderResourceAssociation are
# defined here because there aren't any data_models defined in core neutron
# or neutron services. Instead of jumping through the hoops to create those
# I've just defined them here. If ever data_models or similar are defined
# in those packages, those should be used instead of these.
class IPAllocation(BaseDataModel):
def __init__(self, port_id=None, ip_address=None, subnet_id=None,
network_id=None):
self.port_id = port_id
self.ip_address = ip_address
self.subnet_id = subnet_id
self.network_id = network_id
class Port(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
mac_address=None, admin_state_up=None, status=None,
device_id=None, device_owner=None, fixed_ips=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.network_id = network_id
self.mac_address = mac_address
self.admin_state_up = admin_state_up
self.status = status
self.device_id = device_id
self.device_owner = device_owner
self.fixed_ips = fixed_ips or []
class ProviderResourceAssociation(BaseDataModel):
def __init__(self, provider_name=None, resource_id=None):
self.provider_name = provider_name
self.resource_id = resource_id
class SessionPersistence(BaseDataModel):
def __init__(self, pool_id=None, type=None, cookie_name=None,
pool=None):
self.pool_id = pool_id
self.type = type
self.cookie_name = cookie_name
self.pool = pool
def to_dict(self):
ret_dict = super(SessionPersistence, self).to_dict()
ret_dict.pop('pool_id', None)
ret_dict.pop('pool', None)
return ret_dict
class LoadBalancerStatistics(BaseDataModel):
def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None,
active_connections=None, total_connections=None,
loadbalancer=None):
self.loadbalancer_id = loadbalancer_id
self.bytes_in = bytes_in
self.bytes_out = bytes_out
self.active_connections = active_connections
self.total_connections = total_connections
self.loadbalancer = loadbalancer
def to_dict(self):
ret = super(LoadBalancerStatistics, self).to_dict()
ret.pop('loadbalancer_id', None)
return ret
class HealthMonitor(BaseDataModel):
def __init__(self, id=None, tenant_id=None, type=None, delay=None,
timeout=None, max_retries=None, http_method=None,
url_path=None, expected_codes=None, status=None,
admin_state_up=None, pool=None):
self.id = id
self.tenant_id = tenant_id
self.type = type
self.delay = delay
self.timeout = timeout
self.max_retries = max_retries
self.http_method = http_method
self.url_path = url_path
self.expected_codes = expected_codes
self.status = status
self.admin_state_up = admin_state_up
self.pool = pool
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.listener and
self.pool.listener.loadbalancer)
class Pool(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, description=None,
healthmonitor_id=None, protocol=None, lb_algorithm=None,
admin_state_up=None, status=None, members=None,
healthmonitor=None, sessionpersistence=None, listener=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.healthmonitor_id = healthmonitor_id
self.protocol = protocol
self.lb_algorithm = lb_algorithm
self.admin_state_up = admin_state_up
self.status = status
self.members = members or []
self.healthmonitor = healthmonitor
self.sessionpersistence = sessionpersistence
self.listener = listener
def attached_to_loadbalancer(self):
return bool(self.listener and self.listener.loadbalancer)
def to_dict(self):
ret_dict = super(Pool, self).to_dict()
ret_dict['members'] = [member.to_dict() for member in self.members]
if self.sessionpersistence:
ret_dict['session_persistence'] = self.sessionpersistence.to_dict()
return ret_dict
class Member(BaseDataModel):
def __init__(self, id=None, tenant_id=None, pool_id=None, address=None,
protocol_port=None, weight=None, admin_state_up=None,
subnet_id=None, status=None, pool=None):
self.id = id
self.tenant_id = tenant_id
self.pool_id = pool_id
self.address = address
self.protocol_port = protocol_port
self.weight = weight
self.admin_state_up = admin_state_up
self.subnet_id = subnet_id
self.status = status
self.pool = pool
def attached_to_loadbalancer(self):
return bool(self.pool and self.pool.listener and
self.pool.listener.loadbalancer)
class Listener(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, description=None,
default_pool_id=None, loadbalancer_id=None, protocol=None,
protocol_port=None, connection_limit=None,
admin_state_up=None, status=None, default_pool=None,
loadbalancer=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.default_pool_id = default_pool_id
self.loadbalancer_id = loadbalancer_id
self.protocol = protocol
self.protocol_port = protocol_port
self.connection_limit = connection_limit
self.admin_state_up = admin_state_up
self.status = status
self.default_pool = default_pool
self.loadbalancer = loadbalancer
def attached_to_loadbalancer(self):
return bool(self.loadbalancer)
class LoadBalancer(BaseDataModel):
def __init__(self, id=None, tenant_id=None, name=None, description=None,
vip_subnet_id=None, vip_port_id=None, vip_address=None,
status=None, admin_state_up=None, vip_port=None,
stats=None, provider=None, listeners=None):
self.id = id
self.tenant_id = tenant_id
self.name = name
self.description = description
self.vip_subnet_id = vip_subnet_id
self.vip_port_id = vip_port_id
self.vip_address = vip_address
self.status = status
self.admin_state_up = admin_state_up
self.vip_port = vip_port
self.stats = stats
self.provider = provider
self.listeners = listeners or []
def attached_to_loadbalancer(self):
return True
SA_MODEL_TO_DATA_MODEL_MAP = {
models.LoadBalancer: LoadBalancer,
models.HealthMonitorV2: HealthMonitor,
models.Listener: Listener,
models.PoolV2: Pool,
models.MemberV2: Member,
models.LoadBalancerStatistics: LoadBalancerStatistics,
models.SessionPersistenceV2: SessionPersistence,
models_v2.IPAllocation: IPAllocation,
models_v2.Port: Port,
servicetype_db.ProviderResourceAssociation: ProviderResourceAssociation
}

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lbaas.db.loadbalancer import loadbalancer_db as lb_db
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer.drivers import driver_mixins
@ -46,42 +46,63 @@ class LoadBalancerBaseDriver(object):
def __init__(self, plugin):
self.plugin = plugin
def activate_cascade(self, context, obj):
self.plugin.activate_linked_entities(context, obj)
class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
driver_mixins.BaseStatsMixin,
driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseDeleteHelperMixin,
driver_mixins.BaseManagerMixin):
model_class = models.LoadBalancer
def __init__(self, driver):
super(BaseLoadBalancerManager, self).__init__(driver)
# TODO(dougw), use lb_db.LoadBalancer when v2 lbaas
# TODO(dougw), get rid of __init__() in StatusHelperManager, and
# the if is not None clauses; after fixing this next line,
# it can become a mandatory variable for that subclass.
self.model_class = None
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_loadbalancer
class BaseListenerManager(driver_mixins.BaseManagerMixin):
pass
class BaseListenerManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseDeleteHelperMixin,
driver_mixins.BaseManagerMixin):
model_class = models.Listener
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_listener
def defer_cascade(self, context, listener):
self.driver.plugin.defer_listener(context, listener)
class BasePoolManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseDeleteHelperMixin,
driver_mixins.BaseManagerMixin):
model_class = models.PoolV2
def __init__(self, driver):
super(BasePoolManager, self).__init__(driver)
self.model_class = lb_db.Pool
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_pool
def defer_cascade(self, context, pool):
self.driver.plugin.defer_pool(context, pool)
class BaseMemberManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseDeleteHelperMixin,
driver_mixins.BaseManagerMixin):
model_class = models.MemberV2
def __init__(self, driver):
super(BaseMemberManager, self).__init__(driver)
self.model_class = lb_db.Member
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_member
class BaseHealthMonitorManager(
driver_mixins.BaseHealthMonitorStatusUpdateMixin,
driver_mixins.BaseManagerMixin):
pass
class BaseHealthMonitorManager(driver_mixins.BaseStatusUpdateMixin,
driver_mixins.BaseDeleteHelperMixin,
driver_mixins.BaseManagerMixin):
model_class = models.HealthMonitorV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_healthmonitor

View File

@ -16,6 +16,8 @@ import abc
from neutron.plugins.common import constants
import six
from neutron_lbaas.services.loadbalancer import constants as lb_constants
@six.add_metaclass(abc.ABCMeta)
class BaseManagerMixin(object):
@ -55,30 +57,26 @@ class BaseStatsMixin(object):
class BaseStatusUpdateMixin(object):
# Status update helpers
# Note: You must set self.model_class to an appropriate neutron model
# Note: You must set model_class to an appropriate neutron model
# in your base manager class.
def active(self, context, model_id):
if self.model_class is not None:
self.driver.plugin.update_status(context, self.model_class,
model_id, constants.ACTIVE)
self.driver.plugin.db.update_status(context, self.model_class,
model_id, constants.ACTIVE)
def failed(self, context, model_id):
if self.model_class is not None:
self.driver.plugin.update_status(context, self.model_class,
model_id, constants.ERROR)
self.driver.plugin.db.update_status(context, self.model_class,
model_id, constants.ERROR)
def defer(self, context, model_id):
self.driver.plugin.db.update_status(context, self.model_class,
model_id, lb_constants.DEFERRED)
class BaseHealthMonitorStatusUpdateMixin(object):
class BaseDeleteHelperMixin(object):
def active(self, context, health_monitor_id, pool_id):
self.driver.plugin.update_pool_health_monitor(context,
health_monitor_id,
pool_id,
constants.ACTIVE)
# DB delete helper
# Must define appropriate db delete function
def failed(self, context, health_monitor_id, pool_id):
self.driver.plugin.update_pool_health_monitor(context,
health_monitor_id,
pool_id,
constants.ERROR)
def db_delete(self, context, model_id):
self.db_delete_method(context, model_id)

View File

@ -0,0 +1,199 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import jinja2
import six
from neutron.agent.linux import utils
from neutron.plugins.common import constants as plugin_constants
from oslo.config import cfg
from neutron_lbaas.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp'
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'scur',
constants.STATS_MAX_CONNECTIONS: 'smax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_CONNECTIONS: 'stot',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE_PENDING_STATUSES = plugin_constants.ACTIVE_PENDING_STATUSES + (
plugin_constants.INACTIVE, constants.DEFERRED)
TEMPLATES_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates/'))
JINJA_ENV = None
jinja_opts = [
cfg.StrOpt(
'jinja_config_template',
default=os.path.join(
TEMPLATES_DIR,
'haproxy_v1.4.template'),
help=_('Jinja template file for haproxy configuration'))
]
cfg.CONF.register_opts(jinja_opts, 'haproxy')
def save_config(conf_path, loadbalancer, socket_path=None,
user_group='nogroup'):
"""Convert a logical configuration to the HAProxy version."""
config_str = render_loadbalancer_obj(loadbalancer, user_group, socket_path)
utils.replace_file(conf_path, config_str)
def _get_template():
global JINJA_ENV
if not JINJA_ENV:
template_loader = jinja2.FileSystemLoader(
searchpath=os.path.dirname(cfg.CONF.haproxy.jinja_config_template))
JINJA_ENV = jinja2.Environment(
loader=template_loader, trim_blocks=True, lstrip_blocks=True)
return JINJA_ENV.get_template(os.path.basename(
cfg.CONF.haproxy.jinja_config_template))
def render_loadbalancer_obj(loadbalancer, user_group, socket_path):
loadbalancer = _transform_loadbalancer(loadbalancer)
return _get_template().render({'loadbalancer': loadbalancer,
'user_group': user_group,
'stats_sock': socket_path},
constants=constants)
def _transform_loadbalancer(loadbalancer):
listeners = [_transform_listener(x) for x in loadbalancer.listeners]
return {
'name': loadbalancer.name,
'vip_address': loadbalancer.vip_address,
'listeners': listeners
}
def _transform_listener(listener):
ret_value = {
'id': listener.id,
'protocol_port': listener.protocol_port,
'protocol': PROTOCOL_MAP[listener.protocol]
}
if listener.connection_limit and listener.connection_limit > -1:
ret_value['connection_limit'] = listener.connection_limit
if listener.default_pool:
ret_value['default_pool'] = _transform_pool(listener.default_pool)
return ret_value
def _transform_pool(pool):
ret_value = {
'id': pool.id,
'protocol': PROTOCOL_MAP[pool.protocol],
'lb_algorithm': BALANCE_MAP.get(pool.lb_algorithm, 'roundrobin'),
'members': [],
'health_monitor': '',
'session_persistence': '',
'admin_state_up': pool.admin_state_up,
'status': pool.status
}
members = [_transform_member(x)
for x in pool.members if _include_member(x)]
ret_value['members'] = members
if pool.healthmonitor:
ret_value['health_monitor'] = _transform_health_monitor(
pool.healthmonitor)
if pool.sessionpersistence:
ret_value['session_persistence'] = _transform_session_persistence(
pool.sessionpersistence)
return ret_value
def _transform_session_persistence(persistence):
return {
'type': persistence.type,
'cookie_name': persistence.cookie_name
}
def _transform_member(member):
return {
'id': member.id,
'address': member.address,
'protocol_port': member.protocol_port,
'weight': member.weight,
'admin_state_up': member.admin_state_up,
'subnet_id': member.subnet_id,
'status': member.status
}
def _transform_health_monitor(monitor):
return {
'id': monitor.id,
'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': '|'.join(
_expand_expected_codes(monitor.expected_codes)),
'admin_state_up': monitor.admin_state_up,
}
def _include_member(member):
return member.status in ACTIVE_PENDING_STATUSES and member.admin_state_up
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(
str(i) for i in six.moves.xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval

View File

@ -0,0 +1,33 @@
{# # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#}
# Configuration for {{ loadbalancer_name }}
global
daemon
user nobody
group {{ usergroup }}
log /dev/log local0
log /dev/log local1 notice
stats socket {{ sock_path }} mode 0666 level user
defaults
log global
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
{% block proxies %}{% endblock proxies %}

View File

@ -0,0 +1,29 @@
{# # Copyright 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#}
{% extends 'haproxy_v1.4_proxies.template' %}
{% set loadbalancer_name = loadbalancer.name %}
{% set usergroup = user_group %}
{% set sock_path = stats_sock %}
{% block proxies %}
{% from 'haproxy_v1.4_proxies.template' import frontend_macro as frontend_macro, backend_macro%}
{% for listener in loadbalancer.listeners %}
{{ frontend_macro(constants, listener, loadbalancer.vip_address) }}
{% if listener.default_pool %}
{{ backend_macro(constants, listener, listener.default_pool) }}
{% endif %}
{% endfor %}
{% endblock proxies %}

View File

@ -0,0 +1,74 @@
{# # Copyright 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#}
{% extends 'haproxy_base.template' %}
{% macro frontend_macro(constants, listener, lb_vip_address) %}
frontend {{ listener.id }}
option tcplog
{% if listener.connection_limit is defined %}
maxconn {{ listener.connection_limit }}
{% endif %}
{% if listener.protocol == constants.PROTOCOL_HTTP.lower() %}
option forwardfor
{% endif %}
bind {{ lb_vip_address }}:{{ listener.protocol_port }}
mode {{ listener.protocol }}
{% if listener.default_pool %}
default_backend {{ listener.default_pool.id }}
{% endif %}
{% endmacro %}
{% macro backend_macro(constants, listener, pool) %}
backend {{ pool.id }}
mode {{ pool.protocol }}
balance {{ pool.lb_algorithm }}
{% if pool.session_persistence %}
{% if pool.session_persistence.type == constants.SESSION_PERSISTENCE_SOURCE_IP %}
stick-table type ip size 10k
stick on src
{% elif pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE %}
cookie SRV insert indirect nocache
{% elif pool.session_persistence.type == constants.SESSION_PERSISTENCE_APP_COOKIE and pool.session_persistence.cookie_name %}
appsession {{ pool.session_persistence.cookie_name }} len 56 timeout 3h
{% endif %}
{% endif %}
{% if pool.health_monitor %}
timeout check {{ pool.health_monitor.timeout }}
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTP or pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %}
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }}
http-check expect rstatus {{ pool.health_monitor.expected_codes }}
{% endif %}
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %}
option ssl-hello-chk
{% endif %}
{% endif %}
{% if listener.protocol == constants.PROTOCOL_HTTP.lower() %}
option forwardfor
{% endif %}
{% for member in pool.members %}
{% if pool.health_monitor %}
{% set hm_opt = " check inter %ds fall %d"|format(pool.health_monitor.delay, pool.health_monitor.max_retries) %}
{% else %}
{% set hm_opt = "" %}
{% endif %}
{%if pool.session_persistence.type == constants.SESSION_PERSISTENCE_HTTP_COOKIE %}
{% set persistence_opt = " cookie %s"|format(member.id) %}
{% else %}
{% set persistence_opt = "" %}
{% endif %}
{{ "server %s %s:%d weight %s%s%s"|e|format(member.id, member.address, member.protocol_port, member.weight, hm_opt, persistence_opt)|trim() }}
{% endfor %}
{% endmacro %}

View File

@ -42,11 +42,9 @@ class LoggingNoopCommonManager(object):
def create(self, context, obj):
LOG.debug("LB %s no-op, create %s", self.__class__.__name__, obj.id)
self.active(context, obj.id)
def update(self, context, old_obj, obj):
LOG.debug("LB %s no-op, update %s", self.__class__.__name__, obj.id)
self.active(context, obj.id)
def delete(self, context, obj):
LOG.debug("LB %s no-op, delete %s", self.__class__.__name__, obj.id)
@ -69,38 +67,106 @@ class LoggingNoopLoadBalancerManager(LoggingNoopCommonManager,
"total_connections": 0
}
def create(self, context, loadbalancer):
super(LoggingNoopLoadBalancerManager, self).create(context,
loadbalancer)
self.driver.activate_cascade(context, loadbalancer)
def update(self, context, old_loadbalancer, loadbalancer):
super(LoggingNoopLoadBalancerManager, self).update(context,
old_loadbalancer,
loadbalancer)
self.driver.activate_cascade(context, loadbalancer)
def delete(self, context, loadbalancer):
super(LoggingNoopLoadBalancerManager, self).delete(context,
loadbalancer)
self.db_delete(context, loadbalancer.id)
class LoggingNoopListenerManager(LoggingNoopCommonManager,
driver_base.BaseListenerManager):
def create(self, context, obj):
LOG.debug("LB listener no-op, create %s", self.__class__.__name__,
obj.id)
super(LoggingNoopListenerManager, self).create(context, obj)
self.driver.activate_cascade(context, obj)
def update(self, context, old_obj, obj):
LOG.debug("LB listener no-op, update %s", self.__class__.__name__,
obj.id)
def update(self, context, old_listener, new_listener):
super(LoggingNoopListenerManager, self).update(context, old_listener,
new_listener)
if new_listener.attached_to_loadbalancer():
# Always activate listener and its children if attached to
# loadbalancer
self.driver.activate_cascade(context, new_listener)
elif old_listener.attached_to_loadbalancer():
# If listener has just been detached from loadbalancer
# defer listener and its children
self.defer_cascade(context, new_listener)
if not new_listener.default_pool and old_listener.default_pool:
# if listener's pool has been detached then defer the pool
# and its children
self.driver.pool.defer_cascade(context, old_listener.default_pool)
def delete(self, context, listener):
super(LoggingNoopListenerManager, self).delete(context, listener)
if listener.default_pool:
self.driver.pool.defer_cascade(context, listener.default_pool)
self.db_delete(context, listener.id)
class LoggingNoopPoolManager(LoggingNoopCommonManager,
driver_base.BasePoolManager):
pass
def create(self, context, pool):
super(LoggingNoopPoolManager, self).create(context, pool)
# This shouldn't be called since a pool cannot be created and linked
# to a loadbalancer at the same time
self.driver.activate_cascade(context, pool)
def update(self, context, old_pool, pool):
super(LoggingNoopPoolManager, self).update(context, old_pool, pool)
self.driver.activate_cascade(context, pool)
if not pool.healthmonitor and old_pool.healthmonitor:
self.driver.health_monitor.defer(context,
old_pool.healthmonitor.id)
def delete(self, context, pool):
super(LoggingNoopPoolManager, self).delete(context, pool)
if pool.healthmonitor:
self.driver.health_monitor.defer(context, pool.healthmonitor.id)
self.db_delete(context, pool.id)
class LoggingNoopMemberManager(LoggingNoopCommonManager,
driver_base.BaseMemberManager):
pass
def create(self, context, member):
super(LoggingNoopMemberManager, self).create(context, member)
self.driver.activate_cascade(context, member)
def update(self, context, old_member, member):
super(LoggingNoopMemberManager, self).update(context, old_member,
member)
self.driver.activate_cascade(context, member)
def delete(self, context, member):
super(LoggingNoopMemberManager, self).delete(context, member)
self.db_delete(context, member.id)
class LoggingNoopHealthMonitorManager(LoggingNoopCommonManager,
driver_base.BaseHealthMonitorManager):
def create(self, context, obj):
LOG.debug("LB health monitor no-op, create %s",
self.__class__.__name__, obj.id)
self.active(context, obj.id, obj.id)
def create(self, context, healthmonitor):
super(LoggingNoopHealthMonitorManager, self).create(context,
healthmonitor)
self.driver.activate_cascade(context, healthmonitor)
def update(self, context, old_obj, obj):
LOG.debug("LB health monitor no-op, update %s",
self.__class__.__name__, obj.id)
self.active(context, obj.id, obj.id)
def update(self, context, old_healthmonitor, healthmonitor):
super(LoggingNoopHealthMonitorManager, self).update(context,
old_healthmonitor,
healthmonitor)
self.driver.activate_cascade(context, healthmonitor)
def delete(self, context, healthmonitor):
super(LoggingNoopHealthMonitorManager, self).delete(context,
healthmonitor)
self.db_delete(context, healthmonitor.id)

View File

@ -15,10 +15,11 @@
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import context as ncontext
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.i18n import _LE
from neutron.extensions import loadbalancerv2
from neutron.i18n import _LI, _LE
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services import provider_configuration as pconf
@ -26,7 +27,11 @@ from neutron.services import service_base
from oslo.utils import excutils
from neutron_lbaas.db.loadbalancer import loadbalancer_db as ldb
from neutron_lbaas.db.loadbalancer import loadbalancer_dbv2 as ldbv2
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.services.loadbalancer import agent_scheduler
from neutron_lbaas.services.loadbalancer import constants as lb_const
from neutron_lbaas.services.loadbalancer import data_models
LOG = logging.getLogger(__name__)
@ -50,7 +55,6 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
@ -61,7 +65,7 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
ctx = ncontext.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
@ -142,6 +146,16 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
return self.default_provider
def create_pool(self, context, pool):
# This validation is because the new API version also has a resource
# called pool and these attributes have to be optional in the old API
# so they are not required attributes of the new. Its complicated.
if (pool['pool']['lb_method'] == attrs.ATTR_NOT_SPECIFIED):
raise loadbalancerv2.RequiredAttributeNotSpecified(
attr_name='lb_method')
if (pool['pool']['subnet_id'] == attrs.ATTR_NOT_SPECIFIED):
raise loadbalancerv2.RequiredAttributeNotSpecified(
attr_name='subnet_id')
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
@ -149,8 +163,8 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
# need to add provider name to pool dict,
# because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
try:
@ -323,3 +337,572 @@ class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
class LoadBalancerPluginv2(loadbalancerv2.LoadBalancerPluginBaseV2,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaasv2",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
self.db = ldbv2.LoadBalancerPluginDbv2()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCERV2, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = ncontext.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_loadbalancer_associations(ctx, self.drivers.keys())
def _check_orphan_loadbalancer_associations(self, context, provider_names):
"""Checks remaining associations between loadbalancers and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
loadbalancers = self.db.get_loadbalancers(context)
lost_providers = set(
[loadbalancer.provider.provider_name
for loadbalancer in loadbalancers
if loadbalancer.provider.provider_name not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated load balancers before "
"removing providers %s") % list(lost_providers)
LOG.error(msg)
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
try:
return self.drivers[provider]
except KeyError:
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_loadbalancer(self, context, loadbalancer_id):
loadbalancer = self.db.get_loadbalancer(context, loadbalancer_id)
try:
return self.drivers[loadbalancer.provider.provider_name]
except KeyError:
raise n_exc.Invalid(
_("Error retrieving provider for load balancer. Possible "
"providers are %s.") % self.drivers.keys()
)
def _get_provider_name(self, entity):
if ('provider' in entity and
entity['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(entity['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def _call_driver_operation(self, context, driver_method, db_entity,
old_db_entity=None):
manager_method = "%s.%s" % (driver_method.__self__.__class__.__name__,
driver_method.__name__)
LOG.info(_LI("Calling driver operation %s") % manager_method)
try:
if old_db_entity:
driver_method(context, old_db_entity, db_entity)
else:
driver_method(context, db_entity)
except Exception:
LOG.exception(_LE("There was an error in the driver"))
self.db.update_status(context, db_entity.__class__._SA_MODEL,
db_entity.id, constants.ERROR)
raise loadbalancerv2.DriverError()
def _validate_session_persistence_info(self, sp_info):
"""Performs sanity check on session persistence info.
:param sp_info: Session persistence info
"""
if not sp_info:
return
if sp_info['type'] == lb_const.SESSION_PERSISTENCE_APP_COOKIE:
if not sp_info.get('cookie_name'):
raise ValueError(_("'cookie_name' should be specified for %s"
" session persistence.") % sp_info['type'])
else:
if 'cookie_name' in sp_info:
raise ValueError(_("'cookie_name' is not allowed for %s"
" session persistence") % sp_info['type'])
def defer_listener(self, context, listener, cascade=True):
self.db.update_status(context, models.Listener, listener.id,
lb_const.DEFERRED)
if cascade and listener.default_pool:
self.defer_pool(context, listener.default_pool, cascade=cascade)
def defer_pool(self, context, pool, cascade=True):
self.db.update_status(context, models.PoolV2, pool.id,
lb_const.DEFERRED)
if cascade:
self.defer_members(context, pool.members)
if cascade and pool.healthmonitor:
self.defer_healthmonitor(context, pool.healthmonitor)
def defer_healthmonitor(self, context, healthmonitor):
self.db.update_status(context, models.HealthMonitorV2,
healthmonitor.id, lb_const.DEFERRED)
def defer_members(self, context, members):
for member in members:
self.db.update_status(context, models.MemberV2,
member.id, lb_const.DEFERRED)
def defer_unlinked_entities(self, context, obj, old_obj=None):
# if old_obj is None then this is delete else it is an update
if isinstance(obj, models.Listener):
# if listener.loadbalancer_id is set to None set listener status
# to deferred
deleted_listener = not old_obj
unlinked_listener = (not obj.loadbalancer and old_obj and
old_obj.loadbalancer)
unlinked_pool = (bool(old_obj) and not obj.default_pool and
old_obj.default_pool)
if unlinked_listener:
self.db.update_status(context, models.Listener,
old_obj.id, lb_const.DEFERRED)
# if listener has been deleted OR if default_pool_id has been
# updated to None, then set Pool and its children statuses to
# DEFERRED
if deleted_listener or unlinked_pool or unlinked_listener:
if old_obj:
obj = old_obj
if not obj.default_pool:
return
self.db.update_status(context, models.PoolV2,
obj.default_pool.id, lb_const.DEFERRED)
if obj.default_pool.healthmonitor:
self.db.update_status(context, models.HealthMonitorV2,
obj.default_pool.healthmonitor.id,
lb_const.DEFERRED)
for member in obj.default_pool.members:
self.db.update_status(context, models.MemberV2,
member.id, lb_const.DEFERRED)
elif isinstance(obj, models.PoolV2):
# if pool has been deleted OR if pool.healthmonitor_id has been
# updated to None then set healthmonitor status to DEFERRED
deleted_pool = not old_obj
unlinked_hm = (bool(old_obj) and not obj.healthmonitor and
old_obj.healthmonitor)
if deleted_pool or unlinked_hm:
if old_obj:
obj = old_obj
self.db.update_status(context, models.HealthMonitorV2,
obj.healthmonitor.id,
lb_const.DEFERRED)
def activate_linked_entities(self, context, obj):
if isinstance(obj, data_models.LoadBalancer):
self.db.update_status(context, models.LoadBalancer,
obj.id, constants.ACTIVE)
# only update loadbalancer's status because it's not able to
# change any links to children
return
if isinstance(obj, data_models.Listener):
self.db.update_status(context, models.Listener,
obj.id, constants.ACTIVE)
if obj.default_pool:
self.activate_linked_entities(context, obj.default_pool)
if isinstance(obj, data_models.Pool):
self.db.update_status(context, models.PoolV2,
obj.id, constants.ACTIVE)
if obj.healthmonitor:
self.activate_linked_entities(context, obj.healthmonitor)
for member in obj.members:
self.activate_linked_entities(context, member)
if isinstance(obj, data_models.Member):
# do not overwrite INACTVE status
if obj.status != constants.INACTIVE:
self.db.update_status(context, models.MemberV2, obj.id,
constants.ACTIVE)
if isinstance(obj, data_models.HealthMonitor):
self.db.update_status(context, models.HealthMonitorV2, obj.id,
constants.ACTIVE)
def get_plugin_type(self):
return constants.LOADBALANCERV2
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin v2"
def create_loadbalancer(self, context, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
provider_name = self._get_provider_name(loadbalancer)
lb_db = self.db.create_loadbalancer(context, loadbalancer)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCERV2,
provider_name, lb_db.id)
driver = self.drivers[provider_name]
self._call_driver_operation(
context, driver.load_balancer.create, lb_db)
return self.db.get_loadbalancer(context, lb_db.id).to_dict()
def update_loadbalancer(self, context, id, loadbalancer):
loadbalancer = loadbalancer.get('loadbalancer')
old_lb = self.db.get_loadbalancer(context, id)
self.db.test_and_set_status(context, models.LoadBalancer, id,
constants.PENDING_UPDATE)
try:
updated_lb = self.db.update_loadbalancer(
context, id, loadbalancer)
except Exception as exc:
self.db.update_status(context, models.LoadBalancer, id,
old_lb.status)
raise exc
driver = self._get_driver_for_provider(old_lb.provider.provider_name)
self._call_driver_operation(context,
driver.load_balancer.update,
updated_lb, old_db_entity=old_lb)
return self.db.get_loadbalancer(context, updated_lb.id).to_dict()
def delete_loadbalancer(self, context, id):
old_lb = self.db.get_loadbalancer(context, id)
if old_lb.listeners:
raise loadbalancerv2.EntityInUse(
entity_using=models.Listener.NAME,
id=old_lb.listeners[0].id,
entity_in_use=models.LoadBalancer.NAME)
self.db.test_and_set_status(context, models.LoadBalancer, id,
constants.PENDING_DELETE)
driver = self._get_driver_for_provider(old_lb.provider.provider_name)
self._call_driver_operation(
context, driver.load_balancer.delete, old_lb)
def get_loadbalancer(self, context, id, fields=None):
lb_db = self.db.get_loadbalancer(context, id)
return self.db._fields(lb_db.to_dict(), fields)
def get_loadbalancers(self, context, filters=None, fields=None):
loadbalancers = self.db.get_loadbalancers(context, filters=filters)
return [self.db._fields(lb.to_dict(), fields) for lb in loadbalancers]
def create_listener(self, context, listener):
listener = listener.get('listener')
listener_db = self.db.create_listener(context, listener)
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
self._call_driver_operation(
context, driver.listener.create, listener_db)
else:
self.db.update_status(context, models.Listener, listener_db.id,
lb_const.DEFERRED)
return self.db.get_listener(context, listener_db.id).to_dict()
def update_listener(self, context, id, listener):
listener = listener.get('listener')
old_listener = self.db.get_listener(context, id)
self.db.test_and_set_status(context, models.Listener, id,
constants.PENDING_UPDATE)
try:
listener_db = self.db.update_listener(context, id, listener)
except Exception as exc:
self.db.update_status(context, models.Listener, id,
old_listener.status)
raise exc
if (listener_db.attached_to_loadbalancer() or
old_listener.attached_to_loadbalancer()):
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
else:
driver = self._get_driver_for_loadbalancer(
context, old_listener.loadbalancer_id)
self._call_driver_operation(
context,
driver.listener.update,
listener_db,
old_db_entity=old_listener)
else:
self.db.update_status(context, models.Listener, id,
lb_const.DEFERRED)
return self.db.get_listener(context, listener_db.id).to_dict()
def delete_listener(self, context, id):
self.db.test_and_set_status(context, models.Listener, id,
constants.PENDING_DELETE)
listener_db = self.db.get_listener(context, id)
if listener_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, listener_db.loadbalancer_id)
self._call_driver_operation(
context, driver.listener.delete, listener_db)
else:
self.db.delete_listener(context, id)
def get_listener(self, context, id, fields=None):
listener_db = self.db.get_listener(context, id)
return self.db._fields(listener_db.to_dict(), fields)
def get_listeners(self, context, filters=None, fields=None):
listeners = self.db.get_listeners(context, filters=filters)
return [self.db._fields(listener.to_dict(), fields)
for listener in listeners]
def create_pool(self, context, pool):
pool = pool.get('pool')
# FIXME(brandon-logan) This validation should only exist while the old
# version of the API exists. Remove the following block when this
# happens
pool.pop('lb_method', None)
pool.pop('provider', None)
pool.pop('subnet_id', None)
pool.pop('health_monitors', None)
if ('lb_algorithm' not in pool or
pool['lb_algorithm'] == attrs.ATTR_NOT_SPECIFIED):
raise loadbalancerv2.RequiredAttributeNotSpecified(
attr_name='lb_algorithm')
self._validate_session_persistence_info(
pool.get('session_persistence'))
db_pool = self.db.create_pool(context, pool)
# no need to call driver since on create it cannot be linked to a load
# balancer, but will still update status to DEFERRED
self.db.update_status(context, models.PoolV2, db_pool.id,
lb_const.DEFERRED)
return self.db.get_pool(context, db_pool.id).to_dict()
def update_pool(self, context, id, pool):
pool = pool.get('pool')
self._validate_session_persistence_info(
pool.get('session_persistence'))
old_pool = self.db.get_pool(context, id)
self.db.test_and_set_status(context, models.PoolV2, id,
constants.PENDING_UPDATE)
try:
updated_pool = self.db.update_pool(context, id, pool)
except Exception as exc:
self.db.update_status(context, models.PoolV2, id, old_pool.status)
raise exc
if (updated_pool.attached_to_loadbalancer() or
old_pool.attached_to_loadbalancer()):
if updated_pool.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, updated_pool.listener.loadbalancer_id)
else:
driver = self._get_driver_for_loadbalancer(
context, old_pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.pool.update,
updated_pool,
old_db_entity=old_pool)
else:
self.db.update_status(context, models.PoolV2, id,
lb_const.DEFERRED)
return self.db.get_pool(context, updated_pool.id).to_dict()
def delete_pool(self, context, id):
self.db.test_and_set_status(context, models.PoolV2, id,
constants.PENDING_DELETE)
db_pool = self.db.get_pool(context, id)
if db_pool.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, db_pool.listener.loadbalancer_id)
self._call_driver_operation(context, driver.pool.delete, db_pool)
else:
self.db.delete_pool(context, id)
def get_pools(self, context, filters=None, fields=None):
pools = self.db.get_pools(context, filters=filters)
return [self.db._fields(pool.to_dict(), fields) for pool in pools]
def get_pool(self, context, id, fields=None):
pool_db = self.db.get_pool(context, id)
return self.db._fields(pool_db.to_dict(), fields)
def create_pool_member(self, context, member, pool_id):
member = member.get('member')
member_db = self.db.create_pool_member(context, member, pool_id)
if member_db.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, member_db.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.create,
member_db)
else:
self.db.update_status(context, models.MemberV2, member_db.id,
lb_const.DEFERRED)
return self.db.get_pool_member(context, member_db.id,
pool_id).to_dict()
def update_pool_member(self, context, id, member, pool_id):
member = member.get('member')
old_member = self.db.get_pool_member(context, id, pool_id)
self.db.test_and_set_status(context, models.MemberV2, id,
constants.PENDING_UPDATE)
try:
updated_member = self.db.update_pool_member(context, id, member,
pool_id)
except Exception as exc:
self.db.update_status(context, models.MemberV2, id,
old_member.status)
raise exc
# cannot unlink a member from a loadbalancer through an update
# so no need to check if the old_member is attached
if updated_member.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, updated_member.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.update,
updated_member,
old_db_entity=old_member)
else:
self.db.update_status(context, models.MemberV2, id,
lb_const.DEFERRED)
return self.db.get_pool_member(context, updated_member.id,
pool_id).to_dict()
def delete_pool_member(self, context, id, pool_id):
self.db.test_and_set_status(context, models.MemberV2, id,
constants.PENDING_DELETE)
db_member = self.db.get_pool_member(context, id, pool_id)
if db_member.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, db_member.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.member.delete,
db_member)
else:
self.db.delete_pool_member(context, id, pool_id)
def get_pool_members(self, context, pool_id, filters=None, fields=None):
members = self.db.get_pool_members(context, pool_id, filters=filters)
return [self.db._fields(member.to_dict(), fields)
for member in members]
def get_pool_member(self, context, id, pool_id, filters=None, fields=None):
member = self.db.get_pool_member(context, id, pool_id, filters=filters)
return member.to_dict()
def create_healthmonitor(self, context, healthmonitor):
healthmonitor = healthmonitor.get('healthmonitor')
db_hm = self.db.create_healthmonitor(context, healthmonitor)
# no need to call driver since on create it cannot be linked to a load
# balancer, but will still update status to DEFERRED
self.db.update_status(context, models.HealthMonitorV2, db_hm.id,
lb_const.DEFERRED)
return self.db.get_healthmonitor(context, db_hm.id).to_dict()
def update_healthmonitor(self, context, id, healthmonitor):
healthmonitor = healthmonitor.get('healthmonitor')
old_hm = self.db.get_healthmonitor(context, id)
self.db.test_and_set_status(context, models.HealthMonitorV2, id,
constants.PENDING_UPDATE)
try:
updated_hm = self.db.update_healthmonitor(context, id,
healthmonitor)
except Exception as exc:
self.db.update_status(context, models.HealthMonitorV2, id,
old_hm.status)
raise exc
# cannot unlink a healthmonitor from a loadbalancer through an update
# so no need to check if old_hm is attached
if updated_hm.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, updated_hm.pool.listener.loadbalancer_id)
self._call_driver_operation(context,
driver.healthmonitor.update,
updated_hm,
old_db_entity=old_hm)
else:
self.db.update_status(context, models.HealthMonitorV2, id,
lb_const.DEFERRED)
return self.db.get_healthmonitor(context, updated_hm.id).to_dict()
def delete_healthmonitor(self, context, id):
self.db.test_and_set_status(context, models.HealthMonitorV2, id,
constants.PENDING_DELETE)
db_hm = self.db.get_healthmonitor(context, id)
if db_hm.attached_to_loadbalancer():
driver = self._get_driver_for_loadbalancer(
context, db_hm.pool.listener.loadbalancer_id)
self._call_driver_operation(
context, driver.healthmonitor.delete, db_hm)
else:
self.db.delete_healthmonitor(context, id)
def get_healthmonitor(self, context, id, fields=None):
hm_db = self.db.get_healthmonitor(context, id)
return self.db._fields(hm_db.to_dict(), fields)
def get_healthmonitors(self, context, filters=None, fields=None):
healthmonitors = self.db.get_healthmonitors(context, filters=filters)
return [self.db._fields(healthmonitor.to_dict(), fields)
for healthmonitor in healthmonitors]
def stats(self, context, loadbalancer_id):
loadbalancer = self.db.get_loadbalancer(context, loadbalancer_id)
driver = self._get_driver_for_loadbalancer(context, loadbalancer_id)
stats_data = driver.load_balancer.stats(context, loadbalancer)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
self.db.update_loadbalancer_stats(context, loadbalancer_id,
stats_data)
db_stats = self.db.stats(context, loadbalancer_id)
return {'stats': db_stats.to_dict()}
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCERV2)
# NOTE(brandon-logan): these need to be concrete methods because the
# neutron request pipeline calls these methods before the plugin methods
# are ever called
def get_members(self, context, filters=None, fields=None):
pass
def get_member(self, context, id, fields=None):
pass

View File

@ -21,7 +21,9 @@ from neutron.tests import base as n_base
from neutron.tests.unit import test_api_v2_extension
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_quota_ext
from neutron.tests.unit import testlib_api
from oslo.config import cfg
from testtools import matchers
def override_nvalues():
@ -45,6 +47,155 @@ class NeutronDbPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase):
super(NeutronDbPluginV2TestCase, self).setUp(
plugin, service_plugins, ext_mgr)
def new_list_request(self, resource, fmt=None, params=None, id=None,
subresource=None):
return self._req(
'GET', resource, None, fmt, params=params, subresource=subresource,
id=id
)
def new_show_request(self, resource, id, fmt=None,
subresource=None, sub_id=None, fields=None):
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
return self._req('GET', resource, None, fmt, id=id,
params=params, subresource=subresource, sub_id=sub_id)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None, context=None, sub_id=None):
return self._req(
'PUT', resource, data, fmt, id=id, subresource=subresource,
context=context, sub_id=sub_id
)
def _test_list_with_sort(self, resource,
items, sorts, resources=None,
query_params='',
id=None,
subresource=None,
subresources=None):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
direction)
if not resources:
resources = '%ss' % resource
if subresource and not subresources:
subresources = '%ss' % subresource
req = self.new_list_request(resources,
params=query_str,
id=id,
subresource=subresources)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
if subresource:
resource = subresource
if subresources:
resources = subresources
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
expected_res = [item[resource]['id'] for item in items]
self.assertEqual(expected_res, [n['id'] for n in res[resources]])
def _test_list_with_pagination(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params='',
verify_key='id',
id=None,
subresource=None,
subresources=None):
if not resources:
resources = '%ss' % resource
if subresource and not subresources:
subresources = '%ss' % subresource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
req = self.new_list_request(resources, params=query_str, id=id,
subresource=subresources)
items_res = []
page_num = 0
api = self._api_for_resource(resources)
if subresource:
resource = subresource
if subresources:
resources = subresources
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
items_res = items_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'next':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
self.assertEqual([item[resource][verify_key] for item in items],
[n[verify_key] for n in items_res])
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params='',
id=None,
subresource=None,
subresources=None):
if not resources:
resources = '%ss' % resource
if subresource and not subresources:
subresources = '%ss' % subresource
resource = resource.replace('-', '_')
api = self._api_for_resource(resources)
if subresource:
marker = items[-1][subresource]['id']
else:
marker = items[-1][resource]['id']
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&page_reverse=True&"
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
req = self.new_list_request(resources, params=query_str, id=id,
subresource=subresources)
if subresource:
resource = subresource
if subresources:
resources = subresources
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
res[resources].reverse()
item_res = item_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'previous':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
expected_res = [item[resource]['id'] for item in items]
expected_res.reverse()
self.assertEqual(expected_res, [n['id'] for n in item_res])
class ExtensionTestCase(test_api_v2_extension.ExtensionTestCase):

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,207 @@
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
RET_PERSISTENCE = {
'type': 'HTTP_COOKIE',
'cookie_name': 'HTTP_COOKIE'}
RET_MONITOR = {
'id': 'sample_monitor_id_1',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/index.html',
'expected_codes': '405|404|500',
'admin_state_up': 'true'}
RET_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'admin_state_up': 'true',
'status': 'ACTIVE'}
RET_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'admin_state_up': 'true',
'status': 'ACTIVE'}
RET_POOL = {
'id': 'sample_pool_id_1',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_1, RET_MEMBER_2],
'health_monitor': RET_MONITOR,
'session_persistence': RET_PERSISTENCE,
'admin_state_up': 'true',
'status': 'ACTIVE'}
RET_LISTENER = {
'id': 'sample_listener_id_1',
'protocol_port': 80,
'protocol': 'http',
'default_pool': RET_POOL,
'connection_limit': 98}
RET_LB = {
'name': 'test-lb',
'vip_address': '10.0.0.2',
'listeners': [RET_LISTENER]}
def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None):
proto = 'HTTP' if proto is None else proto
in_lb = collections.namedtuple(
'loadbalancer', 'id, name, vip_address, protocol, vip_port, '
'listeners')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
vip_address='10.0.0.2',
protocol=proto,
vip_port=sample_vip_port_tuple(),
listeners=[sample_listener_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type)]
)
def sample_vip_port_tuple():
vip_port = collections.namedtuple('vip_port', 'fixed_ips')
ip_address = collections.namedtuple('ip_address', 'ip_address')
in_address = ip_address(ip_address='10.0.0.2')
return vip_port(fixed_ips=[in_address])
def sample_listener_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None):
proto = 'HTTP' if proto is None else proto
in_listener = collections.namedtuple(
'listener', 'id, protocol_port, protocol, default_pool, '
'connection_limit')
return in_listener(
id='sample_listener_id_1',
protocol_port=80,
protocol=proto,
default_pool=sample_pool_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type),
connection_limit=98
)
def sample_pool_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None):
proto = 'HTTP' if proto is None else proto
in_pool = collections.namedtuple(
'pool', 'id, protocol, lb_algorithm, members, healthmonitor,'
'sessionpersistence, admin_state_up, status')
mon = sample_health_monitor_tuple(proto=proto) if monitor is True else None
persis = sample_session_persistence_tuple(
persistence_type=persistence_type) if persistence is True else None
return in_pool(
id='sample_pool_id_1',
protocol=proto,
lb_algorithm='ROUND_ROBIN',
members=[sample_member_tuple('sample_member_id_1', '10.0.0.99'),
sample_member_tuple('sample_member_id_2', '10.0.0.98')],
healthmonitor=mon,
sessionpersistence=persis,
admin_state_up='true',
status='ACTIVE')
def sample_member_tuple(id, ip):
in_member = collections.namedtuple('member',
'id, address, protocol_port, '
'weight, subnet_id, '
'admin_state_up, status')
return in_member(
id=id,
address=ip,
protocol_port=82,
weight=13,
subnet_id='10.0.0.1/24',
admin_state_up='true',
status='ACTIVE')
def sample_session_persistence_tuple(persistence_type=None):
spersistence = collections.namedtuple('SessionPersistence',
'type, cookie_name')
pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type
return spersistence(type=pt,
cookie_name=pt)
def sample_health_monitor_tuple(proto=None):
proto = 'HTTP' if proto is None else proto
monitor = collections.namedtuple(
'monitor', 'id, type, delay, timeout, max_retries, http_method, '
'url_path, expected_codes, admin_state_up')
return monitor(id='sample_monitor_id_1', type=proto, delay=30,
timeout=31, max_retries=3, http_method='GET',
url_path='/index.html', expected_codes='500, 405, 404',
admin_state_up='true')
def sample_base_expected_config(frontend=None, backend=None):
if frontend is None:
frontend = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" option forwardfor\n"
" bind 10.0.0.2:80\n"
" mode http\n"
" default_backend sample_pool_id_1\n\n")
if backend is None:
backend = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 405|404|500\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"check inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"check inter 30s fall 3 cookie sample_member_id_2\n")
return ("# Configuration for test-lb\n"
"global\n"
" daemon\n"
" user nobody\n"
" group nogroup\n"
" log /dev/log local0\n"
" log /dev/log local1 notice\n"
" stats socket /sock_path mode 0666 level user\n\n"
"defaults\n"
" log global\n"
" retries 3\n"
" option redispatch\n"
" timeout connect 5000\n"
" timeout client 50000\n"
" timeout server 50000\n\n" + frontend + backend)

View File

@ -0,0 +1,235 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.tests import base
from neutron_lbaas.services.loadbalancer.drivers.haproxy import jinja_cfg
from neutron_lbaas.tests.unit.services.loadbalancer.drivers.haproxy.sample_configs \
import sample_configs
class TestHaproxyCfg(base.BaseTestCase):
def test_save_config(self):
with contextlib.nested(
mock.patch('neutron_lbaas.services.loadbalancer.'
'drivers.haproxy.jinja_cfg.render_loadbalancer_obj'),
mock.patch('neutron.agent.linux.utils.replace_file')
) as (r_t, replace):
r_t.return_value = 'fake_rendered_template'
lb = mock.Mock()
jinja_cfg.save_config('test_conf_path', lb, 'test_sock_path')
r_t.assert_called_once_with(lb, 'nogroup', 'test_sock_path')
replace.assert_called_once_with('test_conf_path',
'fake_rendered_template')
def test_get_template_v14(self):
template = jinja_cfg._get_template()
self.assertEqual('haproxy_v1.4.template', template.name)
def test_render_template_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 405|404|500\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 check "
"inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 check "
"inter 30s fall 3 cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(),
'nogroup', '/sock_path')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_render_template_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:80\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 405|404|500\n"
" option ssl-hello-chk\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 check "
"inter 30s fall 3 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 check "
"inter 30s fall 3 cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTPS'),
'nogroup', '/sock_path')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_monitor_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTP',
monitor=False),
'nogroup', '/sock_path')
self.assertEqual(sample_configs.sample_base_expected_config(
backend=be), rendered_obj)
def test_render_template_no_monitor_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:80\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"cookie sample_member_id_2\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTPS',
monitor=False),
'nogroup', '/sock_path')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_persistence_https(self):
fe = ("frontend sample_listener_id_1\n"
" option tcplog\n"
" maxconn 98\n"
" bind 10.0.0.2:80\n"
" mode tcp\n"
" default_backend sample_pool_id_1\n\n")
be = ("backend sample_pool_id_1\n"
" mode tcp\n"
" balance roundrobin\n"
" server sample_member_id_1 10.0.0.99:82 weight 13\n"
" server sample_member_id_2 10.0.0.98:82 weight 13\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTPS',
monitor=False,
persistence=False),
'nogroup', '/sock_path')
self.assertEqual(sample_configs.sample_base_expected_config(
frontend=fe, backend=be), rendered_obj)
def test_render_template_no_persistence_http(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13\n"
" server sample_member_id_2 10.0.0.98:82 weight 13\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(proto='HTTP',
monitor=False,
persistence=False),
'nogroup', '/sock_path')
self.assertEqual(sample_configs.sample_base_expected_config(
backend=be), rendered_obj)
def test_render_template_sourceip_persistence(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" stick-table type ip size 10k\n"
" stick on src\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 405|404|500\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 check "
"inter 30s fall 3\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 check "
"inter 30s fall 3\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
persistence_type='SOURCE_IP'),
'nogroup', '/sock_path')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_render_template_appsession_persistence(self):
be = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" appsession APP_COOKIE len 56 timeout 3h\n"
" timeout check 31\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 405|404|500\n"
" option forwardfor\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 check "
"inter 30s fall 3\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 check "
"inter 30s fall 3\n\n")
rendered_obj = jinja_cfg.render_loadbalancer_obj(
sample_configs.sample_loadbalancer_tuple(
persistence_type='APP_COOKIE'),
'nogroup', '/sock_path')
self.assertEqual(
sample_configs.sample_base_expected_config(backend=be),
rendered_obj)
def test_transform_session_persistence(self):
in_persistence = sample_configs.sample_session_persistence_tuple()
ret = jinja_cfg._transform_session_persistence(in_persistence)
self.assertEqual(sample_configs.RET_PERSISTENCE, ret)
def test_transform_health_monitor(self):
in_persistence = sample_configs.sample_health_monitor_tuple()
ret = jinja_cfg._transform_health_monitor(in_persistence)
self.assertEqual(sample_configs.RET_MONITOR, ret)
def test_transform_member(self):
in_member = sample_configs.sample_member_tuple('sample_member_id_1',
'10.0.0.99')
ret = jinja_cfg._transform_member(in_member)
self.assertEqual(sample_configs.RET_MEMBER_1, ret)
def test_transform_pool(self):
in_pool = sample_configs.sample_pool_tuple()
ret = jinja_cfg._transform_pool(in_pool)
self.assertEqual(sample_configs.RET_POOL, ret)
def test_transform_listener(self):
in_listener = sample_configs.sample_listener_tuple()
ret = jinja_cfg._transform_listener(in_listener)
self.assertEqual(sample_configs.RET_LISTENER, ret)
def test_transform_loadbalancer(self):
in_lb = sample_configs.sample_loadbalancer_tuple()
ret = jinja_cfg._transform_loadbalancer(in_lb)
self.assertEqual(sample_configs.RET_LB, ret)

View File

@ -26,6 +26,9 @@ class FakeModel(object):
def __init__(self, id):
self.id = id
def attached_to_loadbalancer(self):
return True
def patch_manager(func):
@mock.patch(log_path)
@ -81,18 +84,10 @@ class ManagerTestWithUpdates(ManagerTest):
@patch_manager
def create(self, model):
self.manager.create(self.parent.context, model)
if self.manager.model_class is not None:
self.parent.assertEqual(
str(self.parent.driver.plugin.mock_calls[0])[:18],
"call.update_status")
@patch_manager
def update(self, old_model, model):
self.manager.update(self.parent.context, old_model, model)
if self.manager.model_class is not None:
self.parent.assertEqual(
str(self.parent.driver.plugin.mock_calls[0])[:18],
"call.update_status")
@patch_manager
def delete(self, model):
@ -130,20 +125,21 @@ class TestLoggingNoopLoadBalancerDriver(
self.context = context.get_admin_context()
self.plugin = mock.Mock()
self.driver = driver.LoggingNoopLoadBalancerDriver(self.plugin)
self.fakemodel = mock.Mock()
self.fakemodel.id = 'name-001'
def test_load_balancer_ops(self):
LoadBalancerManagerTest(self, self.driver.load_balancer,
FakeModel("loadbalancer-001"))
self.fakemodel)
def test_listener_ops(self):
ManagerTest(self, self.driver.listener, FakeModel("listener-001"))
ManagerTest(self, self.driver.listener, self.fakemodel)
def test_pool_ops(self):
ManagerTestWithUpdates(self, self.driver.pool, FakeModel("pool-001"))
ManagerTestWithUpdates(self, self.driver.pool, self.fakemodel)
def test_member_ops(self):
ManagerTestWithUpdates(self, self.driver.member,
FakeModel("member-001"))
ManagerTestWithUpdates(self, self.driver.member, self.fakemodel)
def test_health_monitor_ops(self):
ManagerTest(self, self.driver.health_monitor, FakeModel("hm-001"))
ManagerTest(self, self.driver.health_monitor, self.fakemodel)

View File

@ -18,6 +18,7 @@ import copy
import mock
from neutron.api.v2 import attributes as attr
from neutron.extensions import loadbalancer
from neutron.extensions import loadbalancerv2
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit import test_api_v2
@ -456,3 +457,523 @@ class LoadBalancerExtensionTestCase(base.ExtensionTestCase):
instance.delete_pool_health_monitor.assert_called_with(
mock.ANY, health_monitor_id, pool_id='id1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
class LoadBalancerExtensionV2TestCase(base.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(LoadBalancerExtensionV2TestCase, self).setUp()
self._setUpExtension(
'neutron.extensions.loadbalancerv2.LoadBalancerPluginBaseV2',
constants.LOADBALANCERV2, loadbalancerv2.RESOURCE_ATTRIBUTE_MAP,
loadbalancerv2.Loadbalancerv2, 'lbaas', use_quota=True)
def test_loadbalancer_create(self):
lb_id = _uuid()
data = {'loadbalancer': {'name': 'lb1',
'description': 'descr_lb1',
'tenant_id': _uuid(),
'vip_subnet_id': _uuid(),
'admin_state_up': True,
'vip_address': '127.0.0.1'}}
return_value = copy.copy(data['loadbalancer'])
return_value.update({'status': 'ACTIVE', 'id': lb_id})
instance = self.plugin.return_value
instance.create_loadbalancer.return_value = return_value
res = self.api.post(_get_path('lbaas/loadbalancers', fmt=self.fmt),
self.serialize(data),
content_type='application/{0}'.format(self.fmt))
instance.create_loadbalancer.assert_called_with(mock.ANY,
loadbalancer=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_list(self):
lb_id = _uuid()
return_value = [{'name': 'lb1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': lb_id}]
instance = self.plugin.return_value
instance.get_loadbalancers.return_value = return_value
res = self.api.get(_get_path('lbaas/loadbalancers', fmt=self.fmt))
instance.get_loadbalancers.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_loadbalancer_update(self):
lb_id = _uuid()
update_data = {'loadbalancer': {'admin_state_up': False}}
return_value = {'name': 'lb1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': lb_id}
instance = self.plugin.return_value
instance.update_loadbalancer.return_value = return_value
res = self.api.put(_get_path('lbaas/loadbalancers',
id=lb_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_loadbalancer.assert_called_with(
mock.ANY, lb_id, loadbalancer=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_get(self):
lb_id = _uuid()
return_value = {'name': 'lb1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': lb_id}
instance = self.plugin.return_value
instance.get_loadbalancer.return_value = return_value
res = self.api.get(_get_path('lbaas/loadbalancers',
id=lb_id,
fmt=self.fmt))
instance.get_loadbalancer.assert_called_with(mock.ANY, lb_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('loadbalancer', res)
self.assertEqual(res['loadbalancer'], return_value)
def test_loadbalancer_delete(self):
self._test_entity_delete('loadbalancer')
def test_listener_create(self):
listener_id = _uuid()
data = {'listener': {'tenant_id': _uuid(),
'name': 'listen-name-1',
'description': 'listen-1-desc',
'protocol': 'HTTP',
'protocol_port': 80,
'connection_limit': 100,
'admin_state_up': True,
'loadbalancer_id': _uuid(),
'default_pool_id': _uuid()}}
return_value = copy.copy(data['listener'])
return_value.update({'status': 'ACTIVE', 'id': listener_id})
instance = self.plugin.return_value
instance.create_listener.return_value = return_value
res = self.api.post(_get_path('lbaas/listeners', fmt=self.fmt),
self.serialize(data),
content_type='application/{0}'.format(self.fmt))
instance.create_listener.assert_called_with(mock.ANY,
listener=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_list(self):
listener_id = _uuid()
return_value = [{'admin_state_up': True,
'tenant_id': _uuid(),
'id': listener_id}]
instance = self.plugin.return_value
instance.get_listeners.return_value = return_value
res = self.api.get(_get_path('lbaas/listeners', fmt=self.fmt))
instance.get_listeners.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_listener_update(self):
listener_id = _uuid()
update_data = {'listener': {'admin_state_up': False}}
return_value = {'name': 'listener1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': listener_id}
instance = self.plugin.return_value
instance.update_listener.return_value = return_value
res = self.api.put(_get_path('lbaas/listeners',
id=listener_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_listener.assert_called_with(
mock.ANY, listener_id, listener=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_get(self):
listener_id = _uuid()
return_value = {'name': 'listener1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': listener_id}
instance = self.plugin.return_value
instance.get_listener.return_value = return_value
res = self.api.get(_get_path('lbaas/listeners',
id=listener_id,
fmt=self.fmt))
instance.get_listener.assert_called_with(mock.ANY, listener_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('listener', res)
self.assertEqual(res['listener'], return_value)
def test_listener_delete(self):
self._test_entity_delete('listener')
def test_pool_create(self):
pool_id = _uuid()
hm_id = _uuid()
data = {'pool': {'name': 'pool1',
'description': 'descr_pool1',
'protocol': 'HTTP',
'lb_algorithm': 'ROUND_ROBIN',
'healthmonitor_id': hm_id,
'admin_state_up': True,
'tenant_id': _uuid(),
'session_persistence': {}}}
return_value = copy.copy(data['pool'])
return_value.update({'status': "ACTIVE", 'id': pool_id})
instance = self.plugin.return_value
instance.create_pool.return_value = return_value
res = self.api.post(_get_path('lbaas/pools', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_pool.assert_called_with(mock.ANY,
pool=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_list(self):
pool_id = _uuid()
return_value = [{'name': 'pool1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': pool_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lbaas/pools', fmt=self.fmt))
instance.get_pools.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_update(self):
pool_id = _uuid()
update_data = {'pool': {'admin_state_up': False}}
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.update_pool.return_value = return_value
res = self.api.put(_get_path('lbaas/pools', id=pool_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_pool.assert_called_with(mock.ANY, pool_id,
pool=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_get(self):
pool_id = _uuid()
return_value = {'name': 'pool1',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': pool_id}
instance = self.plugin.return_value
instance.get_pool.return_value = return_value
res = self.api.get(_get_path('lbaas/pools', id=pool_id,
fmt=self.fmt))
instance.get_pool.assert_called_with(mock.ANY, pool_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('pool', res)
self.assertEqual(res['pool'], return_value)
def test_pool_delete(self):
self._test_entity_delete('pool')
def test_pool_member_create(self):
subnet_id = _uuid()
member_id = _uuid()
data = {'member': {'address': '10.0.0.1',
'protocol_port': 80,
'weight': 1,
'subnet_id': subnet_id,
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['member'])
return_value.update({'status': "ACTIVE", 'id': member_id})
instance = self.plugin.return_value
instance.create_pool_member.return_value = return_value
res = self.api.post(_get_path('lbaas/pools/pid1/members',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s'
% self.fmt)
instance.create_pool_member.assert_called_with(mock.ANY,
pool_id='pid1',
member=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_list(self):
member_id = _uuid()
return_value = [{'name': 'member1',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': member_id}]
instance = self.plugin.return_value
instance.get_pools.return_value = return_value
res = self.api.get(_get_path('lbaas/pools/pid1/members',
fmt=self.fmt))
instance.get_pool_members.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_pool_member_update(self):
self.skipTest('mock autospec bug causes false negative.'
'Similar bug: '
'https://code.google.com/p/mock/issues/detail?id=224')
member_id = _uuid()
update_data = {'member': {'admin_state_up': False}}
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.update_pool_member.return_value = return_value
res = self.api.put(_get_path('lbaas/pools/pid1/members',
id=member_id,
fmt=self.fmt),
self.serialize(update_data),
content_type='application/%s'
% self.fmt)
instance.update_pool_member.assert_called_with(mock.ANY,
member_id,
member=update_data,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_get(self):
member_id = _uuid()
return_value = {'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': member_id}
instance = self.plugin.return_value
instance.get_pool_member.return_value = return_value
res = self.api.get(_get_path('lbaas/pools/pid1/members',
id=member_id, fmt=self.fmt))
instance.get_pool_member.assert_called_with(mock.ANY,
member_id,
fields=mock.ANY,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('member', res)
self.assertEqual(res['member'], return_value)
def test_pool_member_delete(self):
entity_id = _uuid()
res = self.api.delete(
test_api_v2._get_path('lbaas/pools/pid1/members',
id=entity_id, fmt=self.fmt))
delete_entity = getattr(self.plugin.return_value,
"delete_pool_member")
delete_entity.assert_called_with(mock.ANY, entity_id,
pool_id='pid1')
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_health_monitor_create(self):
health_monitor_id = _uuid()
data = {'healthmonitor': {'type': 'HTTP',
'delay': 2,
'timeout': 1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = copy.copy(data['healthmonitor'])
return_value.update({'status': "ACTIVE", 'id': health_monitor_id})
instance = self.plugin.return_value
instance.create_healthmonitor.return_value = return_value
res = self.api.post(_get_path('lbaas/healthmonitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_healthmonitor.assert_called_with(mock.ANY,
healthmonitor=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_create_with_timeout_negative(self):
data = {'healthmonitor': {'type': 'HTTP',
'delay': 2,
'timeout': -1,
'max_retries': 3,
'http_method': 'GET',
'url_path': '/path',
'expected_codes': '200-300',
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('lbaas/healthmonitors',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_health_monitor_list(self):
health_monitor_id = _uuid()
return_value = [{'type': 'HTTP',
'admin_state_up': True,
'tenant_id': _uuid(),
'id': health_monitor_id}]
instance = self.plugin.return_value
instance.get_healthmonitors.return_value = return_value
res = self.api.get(_get_path('lbaas/healthmonitors', fmt=self.fmt))
instance.get_healthmonitors.assert_called_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_health_monitor_update(self):
health_monitor_id = _uuid()
update_data = {'healthmonitor': {'admin_state_up': False}}
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.update_healthmonitor.return_value = return_value
res = self.api.put(_get_path('lbaas/healthmonitors',
id=health_monitor_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_healthmonitor.assert_called_with(
mock.ANY, health_monitor_id, healthmonitor=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_get(self):
health_monitor_id = _uuid()
return_value = {'type': 'HTTP',
'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE",
'id': health_monitor_id}
instance = self.plugin.return_value
instance.get_healthmonitor.return_value = return_value
res = self.api.get(_get_path('lbaas/healthmonitors',
id=health_monitor_id,
fmt=self.fmt))
instance.get_healthmonitor.assert_called_with(
mock.ANY, health_monitor_id, fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('healthmonitor', res)
self.assertEqual(res['healthmonitor'], return_value)
def test_health_monitor_delete(self):
self._test_entity_delete('healthmonitor')
def test_load_balancer_stats(self):
load_balancer_id = _uuid()
stats = {'stats': 'dummy'}
instance = self.plugin.return_value
instance.stats.return_value = stats
path = _get_path('lbaas/loadbalancers', id=load_balancer_id,
action="stats", fmt=self.fmt)
res = self.api.get(path)
instance.stats.assert_called_with(mock.ANY, load_balancer_id)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('stats', res)
self.assertEqual(res['stats'], stats['stats'])

View File

@ -29,7 +29,8 @@ class LBaaSQuotaExtensionTestCase(base.QuotaExtensionTestCase):
super(LBaaSQuotaExtensionTestCase, self).setUp()
cfg.CONF.set_override(
'quota_items',
['vip', 'pool', 'member', 'health_monitor', 'extra1'],
['vip', 'pool', 'member', 'health_monitor', 'extra1',
'loadbalancer', 'listener', 'healthmonitor'],
group='QUOTAS')
quota.register_resources_from_config()
@ -61,6 +62,9 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(-1, quota['quota']['extra1'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
@ -74,6 +78,9 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
@ -87,6 +94,9 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.assertEqual(10, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
@ -97,6 +107,11 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
quotas = {'quota': {'loadbalancer': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
@ -107,6 +122,11 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
quotas = {'quota': {'loadbalancer': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
@ -116,6 +136,10 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quotas = {'quota': {'loadbalancer': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
@ -124,6 +148,9 @@ class LBaaSQuotaExtensionDbTestCase(LBaaSQuotaExtensionTestCase):
self.assertEqual(100, quota['quota']['pool'])
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(100, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
class LBaaSQuotaExtensionCfgTestCase(
@ -147,6 +174,9 @@ class LBaaSQuotaExtensionCfgTestCase(
self.assertEqual(-1, quota['quota']['member'])
self.assertEqual(-1, quota['quota']['health_monitor'])
self.assertEqual(-1, quota['quota']['extra1'])
self.assertEqual(10, quota['quota']['loadbalancer'])
self.assertEqual(-1, quota['quota']['listener'])
self.assertEqual(-1, quota['quota']['healthmonitor'])
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
@ -155,3 +185,9 @@ class LBaaSQuotaExtensionCfgTestCase(
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
quotas = {'quota': {'loadbalancer': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)