Initial version of lb_policy

This patch provided a preliminary implementation for lb_policy which
now support Neutron LBaaS as the backend loadbalancer service.

Change-Id: I2547cc176c1cb475ae27e66b0012928d0b9fe805
This commit is contained in:
yanyanhu 2015-05-28 05:08:42 -04:00
parent b6741fb1d3
commit 11e423f80f
5 changed files with 483 additions and 117 deletions

View File

@ -3,13 +3,8 @@
# Each Pool member has its own 'address', 'protocol_port, 'weight',
# and 'admin_state_up' property
#### LB propertie
# Port on which servers are running on the members
protocol_port: 80
#### Pool properties
pool:
# Pool ID/name, if given can use an existing pool
# pool: <ID>
@ -17,8 +12,11 @@ pool:
# Protocol used for load balancing
protocol: HTTP
# Subnet for the port on which members can be connected
subnet: private_subnet
# Port on which servers are running on the members
protocol_port: 80
# Name or ID of subnet for the port on which members can be connected.
subnet: private-subnet
# Valid values include:
# ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP
@ -27,18 +25,6 @@ pool:
# Administrative state of the pool
admin_state_up: True
# IP address and port of the pool
vip:
# Subnet of the VIP
subnet: public_subnet
# IP adddress of the VIP
address: 172.24.4.220
# Max #connections per second allowed for this VIP
connection_limit: 500
# TCP port to listen on
protocol_port: 80
# Administrative state up
admin_state_up: True
# session persistence configuration
session_persistence:
# type of session persistence implementation, valid values include:
@ -46,3 +32,23 @@ vip:
type: SOURCE_IP
# Name of cookie if type set to APP_COOKIE
cookie_name: whatever
#### Virtual IP properties
vip:
# Name or ID of Subnet on which VIP address will be allocated
subnet: private-subnet
# IP adddress of the VIP
# address: <ADDRESS>
# Max #connections per second allowed for this VIP
connection_limit: 500
# Protocol used for VIP
protocol: HTTP
# TCP port to listen on
protocol_port: 80
# Administrative state up
admin_state_up: True

View File

@ -0,0 +1,238 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import six
from oslo_log import log as logging
from senlin.common.i18n import _
from senlin.common import trust
from senlin.drivers import base
from senlin.drivers.openstack import neutron_v2 as neutronclient
LOG = logging.getLogger(__name__)
class LoadBalancerDriver(base.DriverBase):
"""Common driver for LoadBalancer management"""
def __init__(self, context):
super(LoadBalancerDriver, self).__init__(context)
self.context = context
params = trust.get_connection_params(context)
self.nc = neutronclient.NeutronClient(params)
def _wait_for_lb_ready(self, lb_data, ignore_not_found=False):
"""Keep waiting until loadbalancer is in ready status
This method will keep waiting until loadbalancer resource listed in
lb_data becoming ready which means its provisioning_status is ACTIVE
its operating_status is ONLINE. If ignore_not_found is set to True,
unexisting of loadbalancer resource is also an acceptable result.
"""
loadbalancer_id = lb_data.get('loadbalancer')
while True:
loadbalancer_ready = True
if loadbalancer_id:
lb = self.nc.loadbalancer_get(loadbalancer_id)
if lb is None:
loadbalancer_ready = True if ignore_not_found else False
elif (lb.provisioning_status == 'ACTIVE') and (
lb.operating_status == 'ONLINE'):
loadbalancer_ready = True
else:
loadbalancer_ready = False
if loadbalancer_ready:
return True, lb_data
else:
LOG.debug(_('Waiting for loadbalancer %(lb)s becoming ready'
) % {'lb': loadbalancer_id})
eventlet.sleep(2)
# TODO(Yanyan Hu): Add timeout check.
def lb_create(self, vip, pool):
"""Create a Neutron lbaas instance"""
# Catch all exceptions that could happen in each step and gracefully
# removing all lbaas related resources that have been created before
# returning.
try:
# Create loadblancer
lb_data = {}
subnet = vip.get('subnet', None)
subnet_id = (self.nc.subnet_get(subnet)).id
address = vip.get('address', None)
admin_state_up = vip.get('admin_state_up', None)
lb = self.nc.loadbalancer_create(subnet_id, address,
admin_state_up)
lb_data['loadbalancer'] = lb.id
res, reason = self._wait_for_lb_ready(lb_data)
if res is not True:
return res, reason
# Create listener
protocol = vip.get('protocol')
protocol_port = vip.get('protocol_port')
connection_limit = vip.get('connection_limit', None)
listener = self.nc.listener_create(lb.id, protocol, protocol_port,
connection_limit,
admin_state_up)
lb_data['listener'] = listener.id
res, reason = self._wait_for_lb_ready(lb_data)
if res is not True:
return res, reason
# Create pool
lb_algorithm = pool.get('lb_method')
protocol = pool.get('protocol')
admin_state_up = pool.get('admin_state_up')
pool = self.nc.pool_create(lb_algorithm, listener.id, protocol,
admin_state_up)
lb_data['pool'] = pool.id
res, reason = self._wait_for_lb_ready(lb_data)
if res is not True:
return res, reason
except Exception as ex:
self.lb_delete(**lb_data)
msg = _('Failed in creating lb resources: %(ex)s '
) % {'ex': six.text_type(ex)}
LOG.error(msg)
return False, msg
return True, lb_data
def lb_delete(self, **kwargs):
"""Delete a Neutron lbaas instance
The following Neutron lbaas resources will be deleted in order:
1)healthmonitor; 2)pool; 3)listener; 4)loadbalancer.
"""
loadbalancer_id = kwargs.get('loadbalancer')
listener_id = kwargs.get('listener')
pool_id = kwargs.get('pool')
healthmonitor_id = kwargs.get('healthmonitor', None)
lb_data = kwargs
try:
if healthmonitor_id is not None:
self.nc.healthmonitor_delete(healthmonitor_id)
del lb_data['healthmonitor']
self._wait_for_lb_ready(lb_data, ignore_not_found=True)
self.nc.pool_delete(pool_id)
del lb_data['pool']
self._wait_for_lb_ready(lb_data, ignore_not_found=True)
self.nc.listener_delete(listener_id)
del lb_data['listener']
self._wait_for_lb_ready(lb_data, ignore_not_found=True)
self.nc.loadbalancer_delete(loadbalancer_id)
except Exception as ex:
msg = _('Failed in deleting lb resources %(data)s: %(ex)s'
) % {'data': lb_data, 'ex': six.text_type(ex)}
LOG.error(msg)
return False, msg
return True, 'lb resource deleting succeeded'
def member_add(self, **kwargs):
"""Add a member to Neutron lbaas pool"""
node = kwargs.get('node')
pool_id = kwargs.get('pool_id')
port = kwargs.get('port')
subnet = kwargs.get('subnet')
try:
addresses = self._get_node_address(node, version=4)
if not addresses:
msg = _('Node does not have valid IPv%(version)s address'
) % {'version': 4}
raise Exception(msg)
else:
network_id = (self.nc.subnet_get(subnet))['network_id']
network_name = (self.nc.network_get(network_id))['name']
if network_name in addresses:
address = addresses[network_name]
else:
msg = _('Node is not in subnet %(subnet)s'
) % {'subnet': subnet}
raise Exception(msg)
subnet_id = (self.nc.subnet_get(subnet)).id
pool_member = self.nc.pool_member_create(pool_id, address, port,
subnet_id)
pool = self.nc.pool_get(pool_id)
listener = self.nc.listener_get(pool.listeners[0]['id'])
lb_data = {
'loadbalancer': listener.loadbalancers[0]['id'],
'member': pool_member.id
}
self._wait_for_lb_ready(lb_data)
except Exception as ex:
msg = _('Failed in adding node %(node)s into pool %(pool)s as '
'a member: %(ex)s') % {'node': node.id, 'pool': pool_id,
'ex': six.text_type(ex)}
LOG.error(msg)
return False
return pool_member.id
def member_remove(self, **kwargs):
"""Delete a member from Neutron lbaas pool"""
pool_id = kwargs.get('pool_id')
member_id = kwargs.get('member_id')
try:
self.nc.pool_member_delete(pool_id, member_id)
pool = self.nc.pool_get(pool_id)
listener = self.nc.listener_get(pool.listeners[0]['id'])
lb_data = {
'loadbalancer': listener.loadbalancers[0]['id'],
}
self._wait_for_lb_ready(lb_data)
except Exception as ex:
msg = _('Failed in removing member %(member)s from pool %(pool): '
'%(ex)s') % {'member': member_id, 'pool': pool_id,
'ex': six.test_type(ex)}
LOG.error(msg)
return False
return True
def _get_node_address(self, node, version=4):
"""Get IP address of node with specific version"""
node_detail = node.get_details(self.context)
node_addresses = node_detail.get('addresses')
address = {}
for network in node_addresses:
for addr in node_addresses[network]:
if addr['version'] == version:
address[network] = addr['addr']
return address

View File

@ -29,6 +29,28 @@ class NeutronClient(base.DriverBase):
self.session = self.conn.session
self.auth = self.session.authenticator
def network_get(self, name_or_id):
try:
network = self.conn.network.find_network(name_or_id)
except sdk.exc.HttpException as ex:
msg = _('Failed in getting network %(value)s: %(ex)s'
) % {'value': name_or_id, 'ex': six.text_type(ex)}
raise exception.Error(msg=msg)
return network
def subnet_get(self, name_or_id):
try:
subnet = self.conn.network.find_subnet(name_or_id)
except sdk.exc.HttpException as ex:
msg = _('Failed in getting subnet %(value)s: %(ex)s'
) % {'value': name_or_id, 'ex': six.text_type(ex)}
# TODO(Yanyan Hu): choose more proper exception type,
# e.g. ResourceNotFound.
raise exception.Error(msg=msg)
return subnet
def loadbalancer_get(self, name_or_id):
try:
lb = self.conn.network.find_load_balancer(name_or_id)

View File

@ -476,6 +476,8 @@ class ClusterAction(base.Action):
# delete nodes if necessary
if desired < current_size:
adjustment = current_size - desired
if 'deletion' not in self.data:
self.data['deletion'] = {'count': adjustment}
candidates = []
# Choose victims randomly
i = adjustment
@ -492,6 +494,8 @@ class ClusterAction(base.Action):
# Create new nodes if desired_capacity increased
if desired > current_size:
delta = desired - current_size
if 'creation' not in self.data:
self.data['creation'] = {'count': delta}
result, reason = self._create_nodes(cluster, delta)
if result != self.RES_OK:
return result, reason

View File

@ -10,13 +10,18 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from senlin.common import constraints
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import schema
from senlin.drivers.openstack import lbaas
from senlin.engine import cluster_policy
from senlin.engine import node as node_mod
from senlin.policies import base
neutron = None
LOG = logging.getLogger(__name__)
class LoadBalancingPolicy(base.Policy):
@ -29,26 +34,28 @@ class LoadBalancingPolicy(base.Policy):
TARGET = [
('AFTER', consts.CLUSTER_ADD_NODES),
('AFTER', consts.CLUSTER_DEL_NODES),
('AFTER', consts.CLUSTER_SCALE_OUT),
('BEFORE', consts.CLUSTER_DEL_NODES),
('BEFORE', consts.CLUSTER_SCALE_IN),
('AFTER', consts.CLUSTER_SCALE_IN),
('AFTER', consts.CLUSTER_RESIZE),
]
PROFILE_TYPE = [
'os.nova.server',
'aws.autoscaling.launchconfig',
]
KEYS = (
PROTOCOL_PORT, POOL, VIP,
POOL, VIP,
) = (
'protocol_port', 'pool', 'vip',
'pool', 'vip',
)
_POOL_KEYS = (
POOL_ID, PROTOCOL, POOL_SUBNET, LB_METHOD, ADMIN_STATE_UP,
POOL_ID, POOL_PROTOCOL, POOL_PROTOCOL_PORT, POOL_SUBNET,
POOL_LB_METHOD, POOL_ADMIN_STATE_UP, POOL_SESSION_PERSISTENCE,
) = (
'pool_id', 'protocol', 'subnet', 'lb_method', 'admin_state_up',
'id', 'protocol', 'protocol_port', 'subnet',
'lb_method', 'admin_state_up', 'session_persistence',
)
PROTOCOLS = (
@ -64,11 +71,11 @@ class LoadBalancingPolicy(base.Policy):
)
_VIP_KEYS = (
VIP_ID, VIP_SUBNET, ADDRESS, CONNECTION_LIMIT, VIP_PROTOCOL_PORT,
VIP_ADMIN_STATE_UP, SESSION_PERSISTENCE,
VIP_ID, VIP_SUBNET, VIP_ADDRESS, VIP_CONNECTION_LIMIT, VIP_PROTOCOL,
VIP_PROTOCOL_PORT, VIP_ADMIN_STATE_UP,
) = (
'vip_id', 'subnet', 'address', 'connection_limit', 'protocol_port',
'admin_state_up', 'session_persistence',
'id', 'subnet', 'address', 'connection_limit', 'protocol',
'protocol_port', 'admin_state_up',
)
_SESSION_PERSISTENCE_KEYS = (
@ -84,66 +91,40 @@ class LoadBalancingPolicy(base.Policy):
)
spec_schema = {
PROTOCOL_PORT: schema.Integer(
_('Port on which servers are running on the nodes.'),
default=80,
),
POOL: schema.Map(
_('LB pool properties.'),
schema={
POOL_ID: schema.String(
_('ID of an existing load-balanced pool.'),
),
PROTOCOL: schema.String(
POOL_PROTOCOL: schema.String(
_('Protocol used for load balancing.'),
constraints=[
constraints.AllowedValues(PROTOCOLS),
],
default=HTTP,
),
POOL_PROTOCOL_PORT: schema.Integer(
_('Port on which servers are running on the nodes.'),
default=80,
),
POOL_SUBNET: schema.String(
_('Subnet for the port on which nodes can be connected.'),
_('Name or ID of subnet for the port on which nodes can '
'be connected.'),
required=True,
),
LB_METHOD: schema.String(
POOL_LB_METHOD: schema.String(
_('Load balancing algorithm.'),
constraints=[
constraints.AllowedValues(LB_METHODS),
],
default=ROUND_ROBIN,
),
ADMIN_STATE_UP: schema.Boolean(
POOL_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the pool.'),
default=True,
),
},
),
VIP: schema.Map(
_('VIP address and port of the pool.'),
schema={
VIP_ID: schema.String(
_('ID of an existing VIP object.'),
),
VIP_SUBNET: schema.String(
_('Subnet of the VIP address.'),
),
ADDRESS: schema.String(
_('IP address of the VIP.'),
required=True,
),
CONNECTION_LIMIT: schema.Integer(
_('Maximum number of connections per second allowed for '
'this VIP'),
),
VIP_PROTOCOL_PORT: schema.Integer(
_('TCP port to listen on.'),
default=80,
),
VIP_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the VIP.'),
default=True,
),
SESSION_PERSISTENCE: schema.Map(
POOL_SESSION_PERSISTENCE: schema.Map(
_('Session pesistence configuration.'),
schema={
PERSISTENCE_TYPE: schema.String(
@ -160,68 +141,183 @@ class LoadBalancingPolicy(base.Policy):
),
},
),
VIP: schema.Map(
_('VIP address and port of the pool.'),
schema={
VIP_ID: schema.String(
_('ID of an existing VIP object.'),
),
VIP_SUBNET: schema.String(
_('Name or ID of Subnet on which the VIP address will be '
'allocated.'),
required=True,
),
VIP_ADDRESS: schema.String(
_('IP address of the VIP.'),
),
VIP_CONNECTION_LIMIT: schema.Integer(
_('Maximum number of connections per second allowed for '
'this VIP'),
),
VIP_PROTOCOL: schema.String(
_('Protocol used for VIP.'),
constraints=[
constraints.AllowedValues(PROTOCOLS),
],
default=HTTP,
),
VIP_PROTOCOL_PORT: schema.Integer(
_('TCP port to listen on.'),
default=80,
),
VIP_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the VIP.'),
default=True,
),
},
),
}
def __init__(self, type_name, name, **kwargs):
super(LoadBalancingPolicy, self).__init__(type_name, name, **kwargs)
self.pool_spec = kwargs.get('pool', None)
self.vip_spec = kwargs.get('vip', None)
self.pool = None
self.vip = None
self.pool_need_delete = True
self.vip_need_delete = True
self.pool_spec = self.spec_data.get(self.POOL, None)
self.vip_spec = self.spec_data.get(self.VIP, None)
self.validate()
self.lb = None
def attach(self, cluster_id, action):
pool_id = self.pool_spec.get('pool')
self.action = action
pool_id = self.pool_spec.get(self.POOL_ID, None)
if pool_id is not None:
self.pool = neutron.get_pool(pool_id)
self.pool_need_delete = False
data = {
'pool': self.pool_id,
'pool_need_delete': False
}
else:
# Create pool using the specified params
self.pool = neutron.create_pool({'pool': self.pool_spec})['pool']
res, data = self.lb_driver.lb_create(self.vip_spec,
self.pool_spec)
if res is not True:
return res, data
else:
data['pool_need_delete'] = True
vip_id = self.vip_spec.get('vip')
if vip_id is not None:
self.vip = neutron.get_vip(vip_id)
self.vip_need_delete = False
else:
# Create vip using specified params
self.vip = neutron.create_vip({'vip': self.vip_spec})['vip']
port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
subnet = self.pool_spec.get(self.POOL_SUBNET)
nodes = node_mod.Node.load_all(action.context, cluster_id=cluster_id)
return True
def detach(self, cluster_id, action):
if self.vip_need_delete:
neutron.delete_vip(self.vip)
if self.pool_need_delete:
neutron.delete_pool(self.pool)
return True
def pre_op(self, cluster_id, action):
if action not in (consts.CLUSTER_DEL_NODES, consts.CLUSTER_SCALE_IN):
return
nodes = action.data.get('nodes', [])
for node in nodes:
member_id = node.data.get('lb_member')
neutron.delete_member(member_id)
return
def post_op(self, cluster_id, action):
if action not in (consts.CLUSTER_ADD_NODES, consts.CLUSTER_SCALE_OUT):
return
nodes = action.data.get('nodes', [])
for node in nodes:
params = {
'pool_id': self.pool,
'address': node.data.get('ip'),
'protocol_port': self.protocol_port,
'admin_state_up': True,
'pool_id': data['pool'],
'node': node,
'port': port,
'subnet': subnet
}
member = neutron.create_member({'member': params})['member']
node.data.update('lb_member', member['id'])
member_id = self.lb_driver.member_add(**params)
if member_id is None:
# Adding member failed, remove all lb resources that
# have been created and return failure reason.
# TODO(Yanyan Hu): Maybe we should tolerate member adding
# failure and allow policy attaching to succeed without
# all nodes being added into lb pool?
self.lb_driver.lb_delete(**data)
return False, 'Failed in adding existed node into lb pool'
else:
node.data.update({'lb_member': member_id})
node.store(action.context)
return True, data
def detach(self, cluster_id, action):
res = True
self.action = action
cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
self.id)
if cp.data['pool_need_delete']:
res, reason = self.lb_driver.lb_delete(**cp.data)
if res is not True:
return res, reason
else:
return res, 'lb resources deleting succeeded'
def post_op(self, cluster_id, action):
"""Add new created node(s) to lb pool"""
self.action = action
cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
self.id)
pool_id = cp.data['pool']
port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
subnet = self.pool_spec.get(self.POOL_SUBNET)
nodes = action.data.get('nodes')
if nodes is None:
return
for node_id in nodes:
node = node_mod.Node.load(action.context, node_id=node_id,
show_deleted=True)
member_id = node.data.get('lb_member')
if (action.action in (consts.CLUSTER_DEL_NODES,
consts.CLUSTER_SCALE_IN))\
or (action.action == consts.CLUSTER_RESIZE and
action.data.get('deletion')):
if member_id:
# Remove nodes that have been deleted from lb pool
params = {
'pool_id': pool_id,
'member_id': member_id,
}
res = self.lb_driver.member_remove(**params)
if res is not True:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('Failed in removing deleted '
'node from lb pool')
return
else:
msg = _('Node %(node)s is not in loadbalancer pool '
'%(pool)s when being deleted from cluster '
'%(cluster)s.') % {'node': node_id,
'pool': pool_id,
'cluster': node.cluster_id}
LOG.warning(msg)
if (action.action in (consts.CLUSTER_ADD_NODES,
consts.CLUSTER_SCALE_OUT))\
or (action.action == consts.CLUSTER_RESIZE and
action.data.get('creation')):
if member_id is None:
# Add new created nodes into lb pool
params = {
'pool_id': pool_id,
'node': node,
'port': port,
'subnet': subnet
}
member_id = self.lb_driver.member_add(**params)
if member_id is None:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('Failed in adding new node '
'into lb pool')
return
node.data.update({'lb_member': member_id})
node.store(action.context)
else:
msg = _('Node %(node)s has been in a loadbalancer pool as'
'member %(member)s before being added to cluster '
'%(cluster)s.') % {'node': node_id,
'member': member_id,
'cluster': node.cluster_id}
LOG.warning(msg)
return
@property
def lb_driver(self):
if self.lb is None:
self.lb = lbaas.LoadBalancerDriver(self.action.context)
return self.lb
else:
return self.lb