Radware LBaaS driver implementation

Change-Id: I43f666ed6716c16b0ef95bc7ed58c4c422a7fd89
Implemnts: blueprint radware-driver-for-lbaas
This commit is contained in:
Avishay Balderman 2013-08-26 18:12:35 +03:00
parent 2a83e5c4a9
commit 98c1ce4c1f
19 changed files with 2813 additions and 0 deletions

View File

@ -364,3 +364,16 @@ signing_dir = $state_path/keystone-signing
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
#service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
[radware]
#vdirect_address=0.0.0.0
#service_ha_pair=False
#service_throughput=1000
#service_ssl_throughput=200
#service_compression_throughput=100
#service_cache=20

View File

@ -0,0 +1,17 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LLC (Radware)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware

View File

@ -0,0 +1,833 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import base64
import copy
import httplib
import os
import Queue
import socket
from StringIO import StringIO
import threading
import time
from zipfile import ZipFile
import eventlet
from oslo.config import cfg
from neutron.common import exceptions as q_exc
from neutron.common import log as call_log
from neutron import context as qcontext
import neutron.db.loadbalancer.loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
eventlet.monkey_patch(thread=True)
LOG = logging.getLogger(__name__)
RESP_STATUS = 0
RESP_REASON = 1
RESP_STR = 2
RESP_DATA = 3
L2_L3_WORKFLOW_TEMPLATE_NAME = 'openstack_l2_l3'
L4_WORKFLOW_TEMPLATE_NAME = 'openstack_l4'
ACTIONS_TO_SKIP = ['setup_l2_l3']
L4_ACTION_NAME = 'BaseCreate'
TEMPLATE_HEADER = {'Content-Type':
'application/vnd.com.radware.vdirect.'
'template-parameters+json'}
PROVISION_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.status+json'}
CREATE_SERVICE_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.adc-service-specification+json'}
ZIP_HEADER = {'Content-Type': 'application/x-zip-compressed'}
L2_CTOR_PARAMS = {"service": "_REPLACE_", "ha_network_name": "HA-Network",
"ha_ip_pool_name": "default", "allocate_ha_vrrp": True,
"allocate_ha_ips": True}
L2_SETUP_L2_L3_PARAMS = {"data_port": 1,
"data_ip_address": "192.168.200.99",
"data_ip_mask": "255.255.255.0",
"gateway": "192.168.200.1",
"ha_port": 2}
driver_opts = [
cfg.StrOpt('vdirect_address',
help=_('vdirect server IP address')),
cfg.BoolOpt('service_ha_pair',
default=False,
help=_('service HA pair')),
cfg.IntOpt('service_throughput',
default=1000,
help=_('service throughtput')),
cfg.IntOpt('service_ssl_throughput',
default=100,
help=_('service ssl throughtput')),
cfg.IntOpt('service_compression_throughput',
default=100,
help=_('service compression throughtput')),
cfg.IntOpt('service_cache',
default=20,
help=_('service cache'))
]
cfg.CONF.register_opts(driver_opts, "radware")
class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
"""Radware lbaas driver."""
def __init__(self, plugin):
rad = cfg.CONF.radware
self.plugin = plugin
self.service = {
"haPair": rad.service_ha_pair,
"primary": {
"capacity": {
"throughput": rad.service_throughput,
"sslThroughput": rad.service_ssl_throughput,
"compressionThroughput":
rad.service_compression_throughput,
"cache": rad.service_cache
},
"network": {
"type": "portgroup",
"portgroups": ['DATA_NETWORK']
},
"adcType": "VA",
"acceptableAdc": "Exact"
}
}
vdirect_address = cfg.CONF.radware.vdirect_address
self.rest_client = vDirectRESTClient(server=vdirect_address)
self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHander(self.queue,
self.rest_client,
plugin)
self.workflows_were_uploaded = False
self.completion_handler.setDaemon(True)
self.completion_handler.start()
def create_vip(self, context, vip):
LOG.debug(_('create_vip. vip: %s'), str(vip))
extended_vip = self.plugin.populate_vip_graph(context, vip)
LOG.debug(_('create_vip. extended_vip: %s'), str(extended_vip))
network_id = self._get_vip_network_id(context, extended_vip)
LOG.debug(_('create_vip. network_id: %s '), str(network_id))
service_name = self._get_service(extended_vip['pool_id'], network_id)
LOG.debug(_('create_vip. service_name: %s '), service_name)
self._create_workflow(
vip['pool_id'], L4_WORKFLOW_TEMPLATE_NAME,
{"service": service_name})
self._update_workflow(
vip['pool_id'],
L4_ACTION_NAME, extended_vip)
def update_vip(self, context, old_vip, vip):
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
vip['pool_id'], L4_ACTION_NAME,
extended_vip, False, lb_db.Vip, vip['id'])
def delete_vip(self, context, vip):
"""Delete a Vip
First delete it from the device. If deletion ended OK
- remove data from DB as well.
If the deletion failed - mark elements with error status in DB
"""
extended_vip = self.plugin.populate_vip_graph(context, vip)
try:
# removing the WF will cause deletion of the configuration from the
# device
self._remove_workflow(extended_vip, context)
except Exception:
pool_id = extended_vip['pool_id']
LOG.exception(_('Failed to remove workflow %s'), pool_id)
_update_vip_graph_status(
self.plugin, context, extended_vip, constants.ERROR
)
def create_pool(self, context, pool):
# nothing to do
pass
def update_pool(self, context, old_pool, pool):
self._handle_pool(context, pool)
def delete_pool(self, context, pool,):
self._handle_pool(context, pool, delete=True)
def _handle_pool(self, context, pool, delete=False):
vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None)
if vip_id:
if delete:
raise loadbalancer.PoolInUse(pool_id=pool['id'])
else:
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
pool['id'], L4_ACTION_NAME,
extended_vip, delete, lb_db.Pool, pool['id'])
else:
if delete:
self.plugin._delete_db_pool(context, pool['id'])
else:
# we keep the pool in PENDING_UPDATE
# no point to modify it since it is not connected to vip yet
pass
def create_member(self, context, member):
self._handle_member(context, member)
def update_member(self, context, old_member, member):
self._handle_member(context, member)
def delete_member(self, context, member):
self._handle_member(context, member, delete=True)
def _handle_member(self, context, member, delete=False):
"""Navigate the model. If a Vip is found - activate a bulk WF action.
"""
vip_id = self.plugin.get_pool(
context, member['pool_id']).get('vip_id')
if vip_id:
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(
member['pool_id'], L4_ACTION_NAME,
extended_vip, delete, lb_db.Member, member['id'])
# We have to delete this member but it is not connected to a vip yet
elif delete:
self.plugin._delete_db_member(context, member['id'])
def create_health_monitor(self, context, health_monitor):
# Anything to do here? the hm is not connected to the graph yet
pass
def update_health_monitor(self, context, old_health_monitor,
health_monitor,
pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id)
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id,
True)
def _handle_pool_health_monitor(self, context, health_monitor,
pool_id, delete=False):
"""Push a graph to vDirect
Navigate the model. Check if a pool is associated to the vip
and push the graph to vDirect
"""
vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None)
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s'),
debug_params)
if vip_id:
vip = self.plugin.get_vip(context, vip_id)
extended_vip = self.plugin.populate_vip_graph(context, vip)
self._update_workflow(pool_id, L4_ACTION_NAME,
extended_vip,
delete, lb_db.PoolMonitorAssociation,
health_monitor['id'])
elif delete:
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def stats(self, context, pool_id):
# TODO(avishayb) implement
return {"bytes_in": 0,
"bytes_out": 0,
"active_connections": 0,
"total_connections": 0}
def _get_vip_network_id(self, context, extended_vip):
subnet = self.plugin._core_plugin.get_subnet(
context, extended_vip['subnet_id'])
return subnet['network_id']
@call_log.log
def _update_workflow(self, wf_name, action, wf_params, delete=False,
lbaas_entity=None, entity_id=None):
"""Update the WF state. Push the result to a queue for processing."""
if not self.workflows_were_uploaded:
self._upload_workflows_templates()
if action not in ACTIONS_TO_SKIP:
params = _translate_vip_object_graph(wf_params)
else:
params = wf_params
resource = '/api/workflow/%s/action/%s' % (wf_name, action)
response = _rest_wrapper(self.rest_client.call('POST', resource,
{'parameters': params},
TEMPLATE_HEADER))
LOG.debug(_('_update_workflow response: %s '), response)
if action not in ACTIONS_TO_SKIP:
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
_('params must contain __ids__ field!')
)
oper = OperationAttributes(response['uri'],
ids,
lbaas_entity,
entity_id,
delete=delete)
LOG.debug(_('Pushing operation %s to the queue'), oper)
self.queue.put_nowait(oper)
def _remove_workflow(self, wf_params, context):
params = _translate_vip_object_graph(wf_params)
ids = params.pop('__ids__', None)
if not ids:
raise q_exc.NeutronException(
_('params must contain __ids__ field!')
)
wf_name = ids['pool']
LOG.debug(_('Remove the workflow %s') % wf_name)
resource = '/api/workflow/%s' % (wf_name)
response = _rest_wrapper(self.rest_client.call('DELETE', resource,
None, None),
[204, 202, 404])
msg = response.get('message', None)
if msg == "Not Found":
self.plugin._delete_db_vip(context, ids['vip'])
else:
oper = OperationAttributes(response['uri'],
ids,
lb_db.Vip,
ids['vip'],
delete=True)
LOG.debug(_('Pushing operation %s to the queue'), oper)
self.queue.put_nowait(oper)
def _remove_service(self, service_name):
resource = '/api/service/%s' % (service_name)
_rest_wrapper(self.rest_client.call('DELETE',
resource, None, None),
[202])
def _get_service(self, pool_id, network_id):
"""Get a service name.
if you cant find one,
create a service and create l2_l2 WF.
"""
incoming_service_name = 'srv_' + network_id
service_name = self._get_available_service(incoming_service_name)
if not service_name:
LOG.debug(
'Could not find a service named ' + incoming_service_name)
service_name = self._create_service(pool_id, network_id)
L2_CTOR_PARAMS["service"] = incoming_service_name
wf_name = 'l2_l3_' + network_id
if not self.workflows_were_uploaded:
self._upload_workflows_templates()
self._create_workflow(
wf_name, L2_L3_WORKFLOW_TEMPLATE_NAME, L2_CTOR_PARAMS)
self._update_workflow(
wf_name, "setup_l2_l3", L2_SETUP_L2_L3_PARAMS)
else:
LOG.debug('A service named ' + service_name + ' was found.')
return service_name
def _create_service(self, pool_id, network_id):
"""create the service and provision it (async)."""
# 1) create the service
service_name = 'srv_' + network_id
resource = '/api/service?name=%s' % service_name
service = copy.deepcopy(self.service)
service['primary']['network']['portgroups'] = [network_id]
response = _rest_wrapper(self.rest_client.call('POST', resource,
service,
CREATE_SERVICE_HEADER), [201])
# 2) provision the service
provision_uri = response['links']['actions']['provision']
_rest_wrapper(self.rest_client.call('POST', provision_uri,
None, PROVISION_HEADER))
return service_name
def _get_available_service(self, service_name):
"""Check if service exsists and return its name if it does."""
resource = '/api/service/' + service_name
try:
_rest_wrapper(self.rest_client.call('GET',
resource,
None, None), [200])
except Exception:
return
return service_name
def _workflow_exists(self, pool_id):
"""Check if a WF having the name of the pool_id exists."""
resource = '/api/workflow/' + pool_id
try:
_rest_wrapper(self.rest_client.call('GET',
resource,
None,
None), [200])
except Exception:
return False
return True
def _create_workflow(self, wf_name, wf_template_name,
create_workflow_params=None):
"""Create a WF if it doesnt exists yet."""
if not self.workflows_were_uploaded:
self._upload_workflows_templates()
if not self._workflow_exists(wf_name):
if not create_workflow_params:
create_workflow_params = {}
resource = '/api/workflowTemplate/%s?name=%s' % (
wf_template_name, wf_name)
params = {'parameters': create_workflow_params}
response = _rest_wrapper(self.rest_client.call('POST',
resource,
params,
TEMPLATE_HEADER))
LOG.debug(_('create_workflow response: %s'), str(response))
def _upload_workflows_templates(self):
"""Upload the driver workflows to vDirect server."""
workflows = {L2_L3_WORKFLOW_TEMPLATE_NAME:
False, L4_WORKFLOW_TEMPLATE_NAME: False}
resource = '/api/workflowTemplate'
response = _rest_wrapper(self.rest_client.call('GET',
resource,
None,
None), [200])
for wf in workflows.keys():
for wf_template in response:
if wf == wf_template['name']:
workflows[wf] = True
break
for wf, found in workflows.items():
if not found:
self._upload_workflow_template(wf)
self.workflows_were_uploaded = True
def _upload_workflow_template(self, wf_template_name):
"""Upload a wf template to vDirect server."""
def _get_folders():
current_folder = os.path.dirname(os.path.realpath(__file__))
folders = [current_folder + '/workflows/' + wf_template_name,
current_folder + '/workflows/common']
return folders
LOG.debug(_('About to upload wf template named %s.zip'),
wf_template_name)
data = self._get_workflow_zip_data(_get_folders())
_rest_wrapper(self.rest_client.call('POST',
'/api/workflowTemplate',
data,
ZIP_HEADER, binary=True), [201])
def _get_workflow_zip_data(self, folders):
"""Create a zip file on the fly and return its content."""
def _file_to_zip(f):
n, ext = os.path.splitext(f)
LOG.debug("file name = " + n + " ext = " + ext)
return f == 'workflow.xml' or ext in ['.vm', '.groovy']
in_memory_file = StringIO()
zip_file = ZipFile(in_memory_file, 'w')
LOG.debug(_('Folders are %s'), folders)
for folder in folders:
LOG.debug(_('Folder is %s'), folder)
for root, dirs, files in os.walk(folder):
for file in files:
if _file_to_zip(file):
LOG.debug(_('About to add file %s to zip'), str(file))
LOG.debug(_('Path: %s'), os.path.join(root, file))
zip_file.write(os.path.join(root, file),
os.path.basename(file))
LOG.debug(_('File %s was added to zip'), str(file))
zip_file.close()
return in_memory_file.getvalue()
class vDirectRESTClient:
"""REST server proxy to Radware vDirect."""
def __init__(self,
server='localhost',
port=2188,
ssl=None,
auth=None,
timeout=5000,
base_uri=''):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.auth = None
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
debug_params = {'server': self.server,
'port': self.port,
'ssl': self.ssl}
LOG.debug(_('vDirectRESTClient:init server=%(server)s, '
'port=%(port)d, '
'ssl=%(ssl)r'), debug_params)
@call_log.log
def call(self, action, resource, data, headers, binary=False):
if resource.startswith('http'):
uri = resource
else:
uri = self.base_uri + resource
if binary:
body = data
else:
body = json.dumps(data)
debug_data = 'binary' if binary else body
debug_data = debug_data if debug_data else 'EMPTY'
if not headers:
headers = {}
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except (socket.timeout, socket.error) as e:
log_dict = {'action': action, 'e': e}
LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'),
log_dict)
ret = 0, None, None, None
conn.close()
return ret
class OperationAttributes:
"""Holds operation attributes."""
def __init__(self,
operation_url,
object_graph,
lbaas_entity=None,
entity_id=None,
delete=False):
self.operation_url = operation_url
self.object_graph = object_graph
self.delete = delete
self.lbaas_entity = lbaas_entity
self.entity_id = entity_id
self.creation_time = time.time()
def __repr__(self):
items = ("%s = %r" % (k, v) for k, v in self.__dict__.items())
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
class OperationCompletionHander(threading.Thread):
"""Update DB with operation status or delete the entity from DB."""
def __init__(self, queue, rest_client, plugin):
threading.Thread.__init__(self)
self.queue = queue
self.rest_client = rest_client
self.admin_ctx = qcontext.get_admin_context()
self.plugin = plugin
self.stoprequest = threading.Event()
def _get_db_status(self, operation, success, messages=None):
"""Get the db_status based on the status of the vdirect operation."""
if not success:
# we have a failure - log it and set the return ERROR as DB state
msg = ', '.join(messages) if messages else "unknown"
error_params = {"operation": operation, "msg": msg}
LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'),
error_params)
return constants.ERROR
if operation.delete:
return None
else:
return constants.ACTIVE
def join(self, timeout=None):
self.stoprequest.set()
super(OperationCompletionHander, self).join(timeout)
def run(self):
oper = None
while not self.stoprequest.isSet():
try:
oper = self.queue.get(timeout=1)
LOG.debug('Operation consumed from the queue: ' +
str(oper))
# check the status - if oper is done: update the db ,
# else push the oper again to the queue
result = self.rest_client.call('GET',
oper.operation_url,
None,
None)
completed = result[RESP_DATA]['complete']
if completed:
# operation is done - update the DB with the status
# or delete the entire graph from DB
success = result[RESP_DATA]['success']
sec_to_completion = time.time() - oper.creation_time
debug_data = {'oper': oper,
'sec_to_completion': sec_to_completion,
'success': success}
LOG.debug(_('Operation %(oper)s is completed after '
'%(sec_to_completion)d sec '
'with success status: %(success)s :'),
debug_data)
db_status = self._get_db_status(oper, success)
if db_status:
_update_vip_graph_status(
self.plugin, self.admin_ctx,
oper, db_status)
else:
_remove_object_from_db(
self.plugin, self.admin_ctx, oper)
else:
# not completed - push to the queue again
LOG.debug(_('Operation %s is not completed yet..') % oper)
# queue is empty - lets take a short rest
if self.queue.empty():
time.sleep(1)
self.queue.put_nowait(oper)
# send a signal to the queue that the job is done
self.queue.task_done()
except Queue.Empty:
continue
except Exception:
m = _("Exception was thrown inside OperationCompletionHander")
LOG.exception(m)
def _rest_wrapper(response, success_codes=[202]):
"""Wrap a REST call and make sure a valid status is returned."""
if response[RESP_STATUS] not in success_codes:
raise q_exc.NeutronException(str(response[RESP_STATUS]) + ':' +
response[RESP_REASON] +
'. Error description: ' +
response[RESP_STR])
else:
return response[RESP_DATA]
def _update_vip_graph_status(plugin, context, oper, status):
"""Update the status
Of all the Vip object graph
or a specific entity in the graph.
"""
LOG.debug(_('_update: %s '), oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin.update_pool_health_monitor(context,
oper.entity_id,
oper.object_graph['pool'],
status)
elif oper.entity_id:
plugin.update_status(context,
oper.lbaas_entity,
oper.entity_id,
status)
else:
# update the whole vip graph status
plugin.update_status(context,
lb_db.Vip,
oper.object_graph['vip'],
status)
plugin.update_status(context,
lb_db.Pool,
oper.object_graph['pool'],
status)
for member_id in oper.object_graph['members']:
plugin.update_status(context,
lb_db.Member,
member_id,
status)
for hm_id in oper.object_graph['health_monitors']:
plugin.update_pool_health_monitor(context,
hm_id,
oper.object_graph['pool'],
status)
def _remove_object_from_db(plugin, context, oper):
"""Remove a specific entity from db."""
LOG.debug(_('_remove_object_from_db %s'), str(oper))
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin._delete_db_pool_health_monitor(context,
oper.entity_id,
oper.object_graph['pool_id'])
elif oper.lbaas_entity == lb_db.Member:
plugin._delete_db_member(context, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip:
plugin._delete_db_vip(context, oper.entity_id)
elif oper.lbaas_entity == lb_db.Pool:
plugin._delete_db_pool(context, oper.entity_id)
else:
raise q_exc.NeutronException(
_('Tried to remove unsupported lbaas entity %s!'),
str(oper.lbaas_entity)
)
TRANSLATION_DEFAULTS = {'session_persistence_type': 'SOURCE_IP',
'session_persistence_cookie_name': 'none',
'url_path': '/',
'http_method': 'GET',
'expected_codes': '200'
}
VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit',
'admin_state_up', 'session_persistence_type',
'session_persistence_cookie_name']
POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up']
MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up']
HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
'admin_state_up', 'url_path', 'http_method',
'expected_codes', 'id']
def _translate_vip_object_graph(extended_vip):
"""Translate the extended vip
translate to a structure that can be
understood by the workflow.
"""
def _create_key(prefix, property_name):
return prefix + '_' + property_name + '_array'
def _trans_prop_name(prop_name):
if prop_name == 'id':
return 'uuid'
else:
return prop_name
def get_ids(extended_vip):
ids = {}
ids['vip'] = extended_vip['id']
ids['pool'] = extended_vip['pool']['id']
ids['members'] = [m['id'] for m in extended_vip['members']]
ids['health_monitors'] = [
hm['id'] for hm in extended_vip['health_monitors']
]
return ids
trans_vip = {}
LOG.debug('Vip graph to be translated: ' + str(extended_vip))
for vip_property in VIP_PROPERTIES:
trans_vip['vip_' + vip_property] = extended_vip.get(
vip_property, TRANSLATION_DEFAULTS.get(vip_property))
for pool_property in POOL_PROPERTIES:
trans_vip['pool_' + pool_property] = extended_vip[
'pool'][pool_property]
for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)] = []
for member in extended_vip['members']:
for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)].append(
member.get(member_property,
TRANSLATION_DEFAULTS.get(member_property)))
for hm_property in HEALTH_MONITOR_PROPERTIES:
trans_vip[
_create_key('hm', _trans_prop_name(hm_property))] = []
for hm in extended_vip['health_monitors']:
for hm_property in HEALTH_MONITOR_PROPERTIES:
value = hm.get(hm_property,
TRANSLATION_DEFAULTS.get(hm_property))
trans_vip[_create_key('hm',
_trans_prop_name(hm_property))].append(value)
ids = get_ids(extended_vip)
trans_vip['__ids__'] = ids
LOG.debug('Translated Vip graph: ' + str(trans_vip))
return trans_vip
def _drop_pending_delete_elements(extended_vip):
"""Traverse the Vip object graph and drop PENDEING_DELETE nodes."""
# What if the pool is pendening_delete?
extended_vip['health_monitors'] = [
hm for hm in extended_vip['health_monitors']
if hm['status'] != constants.PENDING_DELETE
]
extended_vip['members'] = [
member for member in extended_vip['members']
if member['status'] != constants.PENDING_DELETE
]
return extended_vip

View File

@ -0,0 +1,51 @@
import com.radware.alteon.beans.adc.*;
import com.radware.alteon.api.*;
import com.radware.alteon.sdk.*
import com.radware.alteon.sdk.rpm.*
import com.radware.alteon.api.impl.AlteonCliUtils;
import com.radware.alteon.cli.CliSession;
service.provision()
//
// temp patch until provision will make sure SSH is active
// sleep up to 5 min
counter = 0
logger.info("Start waiting for SSH connection.")
COUNTER_MAX = 300
SLEEP_TIME = 2000
while (counter < COUNTER_MAX) {
try {
validateAdcCLIConnection(service.getPrimary());
logger.info("Validated primary (" + counter + ")")
if (service.request.ha) {
validateAdcCLIConnection(service.getSecondary());
logger.info("Validated secondary (" + counter + ")")
}
break
} catch (Exception e) {
counter++
sleep(SLEEP_TIME)
}
}
if(counter >= COUNTER_MAX) {
throw new Exception("Could not validate SSH connection after " + (COUNTER_MAX * SLEEP_TIME) / 1000 + " seconds.")
}
logger.info("Validated SSH connection..")
def validateAdcCLIConnection(AdcCLIConnection connection) {
CliSession s = new CliSession(AlteonCliUtils.convertConnection(connection));
try {
s.connect();
s.close();
} catch (Exception e) {
throw new AdcConnectionException("IOException while validating the connection. Please check the connection settings.",e);
}
}

View File

@ -0,0 +1,168 @@
import groovy.transform.ToString
import groovy.transform.EqualsAndHashCode
import com.radware.alteon.beans.adc.*;
import com.radware.alteon.api.*;
import com.radware.alteon.sdk.*
import com.radware.alteon.sdk.rpm.*
import com.radware.alteon.api.impl.AlteonCliUtils;
import com.radware.alteon.cli.CliSession;
@ToString(includeNames=true)
@EqualsAndHashCode(excludes=["gateway","mask","ips"])
class SubnetInfo {
String id
String gateway
String mask
String ips
}
@ToString(includeNames=true)
@EqualsAndHashCode(excludes=["subnets"])
class PortInfo {
String name
def subnets = [:]
}
def tokenize_key(map_key) {
def ret_arr = map_key.tokenize(".")
if (ret_arr.size > 0 && ret_arr[0].startsWith("port")) {
return ret_arr
}
else
return null;
}
def parse(advanced_props) {
def ports = [:]
advanced_props.each {
key, value ->
def parsed_key = tokenize_key(key)
if (parsed_key) {
def port_name = parsed_key[0]
def subnet_id = parsed_key[1]
def property = parsed_key[2]
def port_info = ports.get(port_name)
if (port_info) {
def subnet_info = port_info.subnets.get(subnet_id)
if (subnet_info) {
subnet_info[property] = value
}
else {
subnet_info = new SubnetInfo(id:subnet_id)
subnet_info[property] = value
port_info.subnets.put(subnet_id, subnet_info)
}
}
else {
port_info = new PortInfo(name:port_name)
subnet_info = new SubnetInfo(id:subnet_id)
subnet_info[property] = value
port_info.subnets.put(subnet_id, subnet_info)
ports.put(port_name, port_info)
}
}
}
return ports
}
def get_property_per_port (ports, port_name, property_name) {
port_info = ports[port_name]
if (port_info) {
port_subnet = port_info.subnets
if (port_subnet && !port_subnet.isEmpty()) {
port_subnet_item = port_subnet.values().iterator().next()
port_subnet_property = port_subnet_item[property_name]
if (port_subnet_property) {
val_array = port_subnet_property.tokenize(",")
if (!val_array.isEmpty())
return val_array[0]
}
}
}
else {
return null
}
}
def cidr_to_mask(cidr) throws NumberFormatException {
String[] st = cidr.split("\\/");
if (st.length != 2) {
throw new NumberFormatException("Invalid CIDR format '"
+ cidr + "', should be: xx.xx.xx.xx/xx");
}
String symbolicIP = st[0];
String symbolicCIDR = st[1];
Integer numericCIDR = new Integer(symbolicCIDR);
if (numericCIDR > 32) {
throw new NumberFormatException("CIDR can not be greater than 32");
}
//Get IP
st = symbolicIP.split("\\.");
if (st.length != 4) {
throw new NumberFormatException("Invalid IP address: " + symbolicIP);
}
int i = 24;
baseIPnumeric = 0;
for (int n = 0; n < st.length; n++) {
int value = Integer.parseInt(st[n]);
if (value != (value & 0xff)) {
throw new NumberFormatException("Invalid IP address: " + symbolicIP);
}
baseIPnumeric += value << i;
i -= 8;
}
//Get netmask
if (numericCIDR < 1)
throw new NumberFormatException("Netmask CIDR can not be less than 1");
netmaskNumeric = 0xffffffff;
netmaskNumeric = netmaskNumeric << (32 - numericCIDR);
return netmaskNumeric
}
def String convert_numeric_ip_to_symbolic(ip) {
StringBuffer sb = new StringBuffer(15);
for (int shift = 24; shift > 0; shift -= 8) {
// process 3 bytes, from high order byte down.
def tmp = (ip >>> shift) & 0xff
sb.append(tmp)
sb.append('.');
}
sb.append(ip & 0xff);
return sb.toString();
}
primary_adc = sdk.read(service.getPrimaryId())
primary_config = primary_adc.adcInfo.advancedConfiguration
primary_ports = parse(primary_config)
data_ip_address = get_property_per_port(primary_ports, "port1", "ips")
data_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port1", "mask")))
gateway = get_property_per_port(primary_ports, "port1", "gateway")
if (service.request.ha) {
secondary_adc = sdk.read(service.getSecondaryId())
secondary_config = secondary_adc.adcInfo.advancedConfiguration
secondary_ports = parse(secondary_config)
ha_ip_address_1 = get_property_per_port(primary_ports, "port2", "ips")
ha_ip_address_2 = get_property_per_port(secondary_ports, "port2", "ips")
ha_vrrp_ip_address = ha_ip_address_1
ha_ip_mask = convert_numeric_ip_to_symbolic(cidr_to_mask(get_property_per_port(primary_ports, "port2", "mask")))
}
else {
secondary_adc = null
secondary_config = null
secondary_ports = null
ha_ip_address_1 = "1.1.1.1"
ha_ip_address_2 = "1.1.1.2"
ha_vrrp_ip_address = "1.1.1.3"
ha_ip_mask = "255.255.255.255"
ha_group_vr_id = 2
}

View File

@ -0,0 +1,131 @@
#property('description', 'Configures VLANs and L3 interface for data and HA networks - v1')
#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
#param("data_ip_address", "ip", "in", "prompt=Data IP Address")
#param("data_ip_mask", "ip", "in", "prompt=Data IP Mask")
#param("gateway", "ip", "in", "prompt=Default Gateway IP Address")
#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
#param("ha_ip_address", "ip", "in", "prompt=HA IP Address")
#param("ha_ip_mask", "ip", "in", "prompt=HA IP Mask")
#param("ha_vrrp_ip_address", "ip", "in", "prompt=HA VRRP IP Address")
#param("ha_group_vr_id", "int", "in", "min=2", "max=1024", "prompt=HA Group VR ID (1 is allocated to the interface VR)")
#param("data_interface_id", "int", "out")
#param("gateway_id", "int", "out")
#param("ha_interface_id", "int", "out")
#param("data_vlan", "int", "out")
#param("ha_vlan", "int", "out")
#if($data_port == $ha_port)
#error("Data Port and HA Port must be on different Ports!!")
#end
#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
#set($port.Indx = $data_port)
#set($port = $adc.read($port))
#if ($adc.isNull($port))
##Port was not found. not too realistic but if so raise an error
#error("Port $data_port was not found!!")
#else
#set($data_vlan = $port.PVID)
#end
#set($port = $adc.newBean('AgPortNewCfgTableEntry'))
#set($port.Indx = $ha_port)
#set($port = $adc.read($port))
#if ($adc.isNull($port))
##Port was not found. not too realistic but if so raise an error
#error("Port $ha_port was not found!!")
#else
#set($ha_vlan = $port.PVID)
#end
#set($Integer = 0)
#set($data_interface_string = "#get_interface_id($data_ip_address, 1)")
#set($data_interface_id = $Integer.parseInt($data_interface_string.trim()))
#create_interface($data_ip_address, $data_ip_mask, $data_vlan, $data_interface_id)
#set($gwb = $adc.newBean('/c/l3/gw'))
#set($gwb.addr = $gateway)
#set($gwb = $adc.findFirst($gwb))
#if ($adc.isNull($gwb))
#set($gateway_id = $adc.getFreeIndexWithDefault('/c/l3/gw', 1))
#else
#error("Gateway with address $gateway already exists on index $gwb.index")
#end
#if ($gateway_id < 5)
/c/l3/gw $gateway_id
addr $gateway
arp ena
ena
#else
#log('error', "The available gateway index $gatewayId cannot be used for a default gateway!")
#error("No available index for a default gateway!")
#end
#if($ha_enabled)
#set($ha_interface_string = "#get_interface_id($ha_ip_address, $data_interface_id)")
#set($ha_interface_id = $Integer.parseInt($ha_interface_string.trim()))
#create_interface($ha_ip_address, $ha_ip_mask, $ha_vlan, $ha_interface_id)
/c/l3/vrrp/on
/c/l3/vrrp/hotstan enabled
/c/l3/vrrp/vr 1
ena
ipver v4
vrid 1
if $ha_interface_id
addr $ha_vrrp_ip_address
share dis
/c/l3/vrrp/group
ena
ipver v4
vrid $ha_group_vr_id
if $ha_interface_id
share dis
/c/slb/port $data_port
hotstan ena
/c/slb/port $ha_port
intersw ena
#else
#set($ha_interface_id = 0)
#end
/c/slb
on
/c/slb/port $data_port
client ena
server ena
proxy ena
#macro(get_interface_id, $address, $default_index)
#set($interface = $adc.newBean('/c/l3/if'))
#set($interface.addr = $address)
#set($interface = $adc.findFirst($interface))
#if ($adc.isNull($interface))
## IP address not found
#set($interface_id = $adc.getFreeIndexWithDefault('/c/l3/if', $default_index))
$interface_id
#else
## Found existing interface with this address
#error("Found existing interface with address $address on index $interface.index!!")
#end
#end
#macro(create_interface, $address, $mask, $vlan, $interface_id)
/c/l3/if $interface_id
addr $address
mask $mask
vlan $vlan
ena
#end

View File

@ -0,0 +1,45 @@
#property('description', 'Cleanup VLANs and L3 interface for data and HA networks - v1')
#param("data_port", "int", "in", "min=1", "max=2", "prompt=Data Port")
#param("data_interface_id", "int", "in", "min=1", "max=256", "prompt=Data Interface ID")
#param("gateway_id", "int", "in", "min=1", "max=4", "prompt=Default Gateway ID")
#param("ha_enabled", "bool", "in", "prompt=HA Enabled?")
#param("ha_port", "int", "in", "min=1", "max=2", "prompt=HA Port")
#param("ha_interface_id", "int", "in", "min=1", "max=256", "prompt=HA Interface ID")
#if($ha_enabled)
/c/slb/port $data_port
hotstan dis
/c/slb/port $ha_port
intersw dis
/c/l3/vrrp/group
del
/c/l3/vrrp/vr 1
del
/c/l3/vrrp/hotstan dis
/c/l3/vrrp/off
#delete_interface($ha_interface_id)
#end
/c/slb
off
/c/l3/gw $gateway_id
del
#delete_interface($data_interface_id)
#macro(delete_interface, $interface_id)
/c/l3/if $interface_id
del
#end

View File

@ -0,0 +1,166 @@
<?xml version="1.0" ?>
<workflow createAction="init" deleteAction="teardown_l2_l3" name="openstack_l2_l3" xmlns="http://www.radware.com/vdirect">
<description>Workflow to setup L2 and L3 for Alteon VA, Single or HA Pair, in Hot Standbye [2013-07-25 11:50:20.285000]</description>
<persist>
<!-- Declare the persistent parameters of the workflow -->
<parameters>
<parameter name="service" prompt="ADC service" type="adcService"/>
<parameter defaultValue="HA-Network" name="ha_network_name" prompt="HA Network Name" type="string"/>
<!-- Data Info-->
<parameter defaultValue="1" max="2" min="1" name="data_port" prompt="Data Port" type="int"/>
<parameter name="data_ip_address" prompt="Data IP Address" type="ip"/>
<parameter name="data_ip_mask" prompt="Data IP Mask" type="ip"/>
<parameter name="gateway" prompt="Default Gateway IP Address" type="ip"/>
<!-- HA Info -->
<parameter defaultValue="2" max="2" min="1" name="ha_port" prompt="HA Port" type="int"/>
<parameter name="ha_ip_address_1" prompt="HA IP Address for Primary ADC" type="ip"/>
<parameter name="ha_ip_address_2" prompt="HA IP Address for Secondary ADC" type="ip"/>
<parameter name="ha_vrrp_ip_address" prompt="HA VRRP IP Address" type="ip"/>
<parameter name="ha_ip_mask" prompt="Data IP Mask" type="ip"/>
<parameter max="1024" min="2" name="ha_group_vr_id" prompt="HA Group VR ID (1 is allocated to the interface VR)" type="int"/>
<parameter defaultValue="save" name="apply_type" prompt="Device apply type" type="string" values="none,apply,save"/>
<!-- Calculated -->
<parameter name="ha_enabled" type="bool"/>
<parameter name="ha_network" type="network"/>
<parameter name="ha_vrrp_pool" type="vrrpPool"/>
<parameter name="data_interface_id_1" type="int"/>
<parameter name="data_interface_id_2" type="int"/>
<parameter name="gateway_id_1" type="int"/>
<parameter name="gateway_id_2" type="int"/>
<parameter name="ha_interface_id_1" type="int"/>
<parameter name="ha_interface_id_2" type="int"/>
<parameter name="data_vlan_1" type="int"/>
<parameter name="data_vlan_2" type="int"/>
<parameter name="ha_vlan_1" type="int"/>
<parameter name="ha_vlan_2" type="int"/>
</parameters>
</persist>
<!-- Declare the states used by this workflow -->
<states>
<state name="initialized"/>
<state name="applied"/>
<state name="removed"/>
</states>
<!-- Declare the workflow actions -->
<actions>
<action fromState="none" name="init" toState="initialized">
<inputs>
<parameters>
<parameter name="service"/>
<parameter name="ha_network_name"/>
</parameters>
</inputs>
<sequence>
<log message="This may take time... making sure that the service is provisioned."/>
<script file="wait_for_service.groovy" name="wait_for_service"/>
<log message="Service is provisioned."/>
<set saveAs="$ha_enabled" value="$service.request.ha"/>
</sequence>
<sequence if="$ha_enabled">
<set saveAs="$ha_network" value="${service.containerResourcePool.getNetwork($ha_network_name)}"/>
<error if="$workflow.isNull($ha_network)" message="Requested to allocate HA IPs while HA Network $ha_network_name not found!"/>
</sequence>
<sequence>
<log message="Read HA and Data IP addresses from the instances"/>
<script file="read_ips_data_from_service.groovy" name="read_ips_data_from_service"/>
<!-- out: data_ip_address, data_ip_mask, gateway, ha_ip_address_1, ha_ip_address_2, ha_vrrp_ip_address, ha_ip_mask-->
</sequence>
<sequence if="$ha_enabled &amp;&amp; $workflow.isNotNull($ha_network)">
<set saveAs="$ha_vrrp_pool" value="${ha_network.getVrrpPool()}"/>
<error if="$workflow.isNull($ha_vrrp_pool)" message="Requested to allocate HA VRRP while VRRP Pool not found in HA network $ha_network_name!"/>
<acquireResource comment="HA VR for service $service.id" owner="$service.id" pool="$ha_vrrp_pool" saveAs="$ha_group_vr_id"/>
</sequence>
<onError>
<autoReleaseResource/>
</onError>
</action>
<!-- setup l2 and l3 on the two devices -->
<action fromState="initialized" name="setup_l2_l3" toState="applied">
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<sequence>
<error if="$workflow.isNull($ha_ip_address_1)" message="HA IP address for primary ADC is not set!"/>
<error if="$ha_enabled &amp;&amp; $workflow.isNull($ha_ip_address_2)" message="HA IP address for secondary ADC is not set!"/>
<error if="$ha_enabled &amp;&amp; $workflow.isNull($ha_vrrp_ip_address)" message="HA VRRP IP address is not set!"/>
<error if="$workflow.isNull($ha_ip_mask)" message="HA IP mask is not set!"/>
<error if="$workflow.isNull($ha_group_vr_id)" message="HA group VR id is not set!"/>
<error if="$workflow.isNull($data_ip_address)" message="Data IP address is not set!"/>
<error if="$workflow.isNull($data_ip_mask)" message="Data IP mask is not set!"/>
<error if="$workflow.isNull($gateway)" message="Gateway IP address is not set!"/>
<log message="Start 'setup l2 l3 adc1' step."/>
<configuration file="setup_l2_l3.vm" name="setup_l2_l3_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$ha_ip_address_1" to="$ha_ip_address"/>
<!-- Outputs -->
<map from="$data_interface_id_1" to="$data_interface_id"/>
<map from="$gateway_id_1" to="$gateway_id"/>
<map from="$ha_interface_id_1" to="$ha_interface_id"/>
<map from="$data_vlan_1" to="$data_vlan"/>
<map from="$ha_vlan_1" to="$ha_vlan"/>
</parameterMapping>
</configuration>
<log message="Completed 'setup l2 l3 adc1' step."/>
<log if="$ha_enabled" message="Start 'setup l2 l3 adc2' step."/>
<configuration if="$ha_enabled" file="setup_l2_l3.vm" name="setup_l2_l3_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$ha_ip_address_2" to="$ha_ip_address"/>
<!-- Outputs -->
<map from="$data_interface_id_2" to="$data_interface_id"/>
<map from="$gateway_id_2" to="$gateway_id"/>
<map from="$ha_interface_id_2" to="$ha_interface_id"/>
<map from="$data_vlan_2" to="$data_vlan"/>
<map from="$ha_vlan_2" to="$ha_vlan"/>
</parameterMapping>
</configuration>
<log if="$ha_enabled" message="Completed 'setup l2 l3 adc2' step."/>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
<action name="teardown_l2_l3" toState="removed">
<!-- Called when workflow is destroyed. A placeholder for 'setup' operations -->
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<sequence ifState="applied">
<log message="Start 'teardown l2 l3 adc1' step."/>
<configuration file="teardown_l2_l3.vm" name="teardown_l2_l3_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$data_interface_id_1" to="$data_interface_id"/>
<map from="$gateway_id_1" to="$gateway_id"/>
<map from="$ha_interface_id_1" to="$ha_interface_id"/>
</parameterMapping>
</configuration>
<log message="Completed teardown l2 l3 adc1' step."/>
<log if="$ha_enabled" message="Start 'teardown l2 l3 adc2' step."/>
<configuration if="$ha_enabled" file="teardown_l2_l3.vm" name="teardown_l2_l3_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$data_interface_id_2" to="$data_interface_id"/>
<map from="$gateway_id_2" to="$gateway_id"/>
<map from="$ha_interface_id_2" to="$ha_interface_id"/>
</parameterMapping>
</configuration>
<log if="$ha_enabled" message="Completed teardown l2 l3 adc2' step."/>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<sequence if="$ha_enabled">
<releaseResource pool="$ha_vrrp_pool" resource="$ha_group_vr_id"/>
<set saveAs="$ha_enabled" value="false"/>
</sequence>
<onError>
<autoRevert/>
<!-- The resource is released as the next step will be to delete the ADC service itself so it is better to have the resource back to its pool -->
<releaseResource if="$ha_enabled" pool="$ha_vrrp_pool" resource="$ha_group_vr_id"/>
</onError>
</action>
</actions>
</workflow>

View File

@ -0,0 +1,247 @@
#property("summary", "Openstack - common macros and constants.")
## --------------------
## Common "constants"
## --------------------
#set($NONE="none")
#set($DEFAULT_HEALTH_MONITOR_TYPE="arp")
#set($HC_TYPE_CONVERSION={"PING":"icmp","TCP":"tcp","HTTP":"http","HTTPS":"http"})
#set($SERVICE_ALGO_CONVERSION={"ROUND_ROBIN":"roundrobin","LEAST_CONNECTIONS":"leastconns","SOURCE_IP":"phash"})
#set($HC_TYPE_TO_POJO_CONVERSION={"HTTP":"SlbNewAdvhcHttpEntry","HTTPS":"SlbNewAdvhcHttpEntry","PING":"SlbNewAdvhcIcmpEntry","TCP":"SlbNewAdvhcTcpEntry"})
#set($HC_MAX_HCS_PER_GROUP=8)
#set($HC_MAX_DELAY=600)
#set($HC_MAX_TIMEOUT=600)
#set($HC_HTTP_METHODS=['get','head','post'])
#set($SERVICE_TYPES=["http","https","ssl","dns","rtsp","wts","basic-slb"])
#set($HC_HTTP_MAX_RESPONSE_STRING_SIZE=47)
#set($HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT=12)
#set($HC_MAX_ID_LENGTH=32)
#set($GROUP_NAME_MAX_LENGTH=31)
#set($NO_IP="0.0.0.0")
#set($SESSION_PERSISTENCE_COOKIE_SIZE=64)
#set($CREATE_MODE = "CREATE")
#set($DELETE_MODE = "DELETE")
#set($DOT = ".")
#set($IPV4="v4")
#set($IPV6="v6")
#set ($IP_FIELDS = {$IPV4 : "IpAddr" , $IPV6 : "Ipv6Addr"})
#set ($VIRT_IP_FIELDS = {$IPV4 : "IpAddress" , $IPV6 : "Ipv6Addr"})
#set($IPV4_REGEX = ""+'\'+"A(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_HEX4DECCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?) ::((?:[0-9A-Fa-f]{1,4}:)*)(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_6HEX4DEC_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}:){6,6})(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)(" + '\' + ".(25[0-5]|2[0-4]" + '\' + "d|[0-1]?" + '\' + "d?" + '\' + "d)){3}" + '\' + "z")
#set($IPV6_HEXCOMPRESSED_REGEX = "" + '\' + "A((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)" + '\' + "z")
#set($IPV6_REGEX = "" + '\' + "A(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}" + '\' + "z")
## ------------------------------------ macros ------------------------------------------------------------------
#macro(os_print_hc_config,$hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#os_print_health_check_header($hcType, $hcUUID)
dport none
name "$hcRefCount"
retry $hcMxRetries
#os_print_inter_and_timeout($hcDelay, $hcTimeout)
#if($hcType.startsWith("HTTP"))
#if($hcType=="HTTPS")
ssl enabled
#end
http
#os_print_http_method($hcHttpMethod, $hcUUID)
path "$hcUrlPath"
#os_print_http_response($hcExpectedCodes, $hcUUID)
#end
#end
#macro(os_print_inter_and_timeout, $hcDelay, $hcTimeout)
##Alteon requires that the timeout will be samller or equal to the interval.
#if($hcTimeout > $hcDelay)
#set($hcDelay=$hcTimeout)
#end
#if($hcDelay>$HC_MAX_DELAY)
#set($hcDelay=$HC_MAX_DELAY)
#end
inter $hcDelay
#if($hcTimeout>$HC_MAX_TIMEOUT)
#set($hcTimeout=$HC_MAX_TIMEOUT)
#end
timeout $hcTimeout
#end
#macro(os_get_ref_count_for_hc, $hcUUID, $hcType, $refCountRetArray)
## find the vDirect POJO for the given $hcType
#set($hceName=$HC_TYPE_TO_POJO_CONVERSION.get($hcType))
#if($adc.isNull($hceName))
#error("Unknown Health Check type occured $hcType with id $hcUUID!")
#end
## strip the uuid
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
## allocate a bean and look for it by its ID (openstack uuid is used here)
#set($hce = $adc.newBean($hceName))
#set($hce.ID = $stripped_uuid[0])
#set($hce2 = $adc.read($hce))
#if ($hce2.isEmpty())
## hc was not found on the device - RefCount is zero
#set($refCountRetArray[0]=0)
#else
## hc was found on the device - we use the 'Name' field to store its reference count
#set($hcRefCountStr=$hce2.Name)
#set($hcRefCount=0)
#set($hcRefCount=$hcRefCount.parseInt($hcRefCountStr))
#set($refCountRetArray[0]=$hcRefCount)
#end
#end
#macro(os_print_health_check_header, $hcType, $hcUUID)
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
#if($hcType==$NONE)
/c/slb/advhc/health $stripped_uuid[0]
#else
#set($calcHcType=$HC_TYPE_CONVERSION.get($hcType))
#if($adc.isNull($calcHcType))
#error("Unsupported Health Monitor type $hcType in id $hcUUID!")
#else
/c/slb/advhc/health $stripped_uuid[0] $calcHcType
#end
#end
#end
#macro(os_print_http_method, $method, $hcUUID)
#set($lower_method=$method.toLowerCase())
#set($found=$HC_HTTP_METHODS.contains($lower_method))
#if(!$found)
#error("Unsupported HTTP method $method for id $hcUUID!")
#else
method $lower_method
#end
#end
#macro(os_convert_range_into_two_integers, $response,$result)
#set($left=$response.substring(0,$dash_index))
#set($offset=$dash_index + 1)
#set($right=$response.substring($offset,$response.length()))
#set($leftInt=0)
#set($leftInt=$leftInt.parseInt($left))
#set($rightInt=0)
#set($rightInt=$rightInt.parseInt($right))
#set($result[0]=$leftInt)
#set($result[1]=$rightInt)
#end
#macro(os_print_http_response, $response, $hcUUID)
#set($dash_index=$response.indexOf('-'))
#if($dash_index != -1)
#set($result=[-1,-1])
#os_convert_range_into_two_integers($response,$result)
#set($range_size=$result[1] - $result[0])
#if($range_size > $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT)
#error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
#else
#set($calc_reponse="")
#foreach($response_code in [$leftInt..$rightInt])
#set($calc_reponse=$calc_reponse + $response_code + ",")
#end
#set($len=$calc_reponse.length() - 1))
#set($calc_reponse=$calc_reponse.substring(0,$len))
response $calc_reponse none ""
#end
#else
#if($response.length() > $HC_HTTP_MAX_RESPONSE_STRING_SIZE)
#error("Too many reponse codes in the range $response ($range_size) for id $hcUUID! Up to $HC_HTTP_MAX_RESPONSE_ELEMENT_COUNT reponse codes are supported!")
#else
response $response none ""
#end
#end
#end
## Remove the "-" from $uuid
## Make sure it is not too long
## Return value using $result
#macro(os_strip_uuid,$uuid,$result)
#set($strippedUUID=$uuid.replaceAll("-",""))
#if($strippedUUID.length() > $HC_MAX_ID_LENGTH)
#error("UUID $strippedUUID is too long and cant be used as Health Monitor ID. Maximum length is $HC_MAX_ID_LENGTH.")
#end
#set($result[0]=$strippedUUID)
#end
## --------------------------------------------------------------
## Get the IP version of the incoming argument
## Raise an exception if no match was found
## --------------------------------------------------------------
#macro(os_get_ip_version, $ip)
#if($ip.matches($IPV4_REGEX))
$IPV4
#elseif($ip.matches($IPV6_REGEX) || $ip.matches($IPV6_HEXCOMPRESSED_REGEX) || $ip.matches($IPV6_6HEX4DEC_REGEX) || $ip.matches($IPV6_HEX4DECCOMPRESSED_REGEX))
$IPV6
#else
#error("IP Address $ip is not a valid IP!")
#end
#end
#macro(os_print_persist_command, $persistMethod, $cookieName, $serviceType)
#if ($persistMethod=="APP_COOKIE")
pbind cookie passive "$cookieName" 1 64 enable
#elseif ($persistMethod=="HTTP_COOKIE")
pbind cookie insert "$cookieName" secure
#elseif ($persistMethod=="SOURCE_IP")
#if($serviceType=="https")
pbind sslid
#else
pbind clientip norport
#end
#end
#end
#macro(os_get_service_type, $serviceType)
#set($serviceType=$serviceType.toLowerCase())
#if($SERVICE_TYPES.contains($serviceType))
$serviceType
#else
#if($serviceType=="tcp")
"basic-slb"
#else
#error("Unsupported service type $serviceType!")
#end
#end
#end
#macro(os_print_metric_command, $serviceAlgorithm)
#set($algo=$SERVICE_ALGO_CONVERSION.get($serviceAlgorithm))
#if($adc.isNull($algo))
#error("Unsupported group metric type $serviceAlgorithm!")
#else
metric $algo
#end
#end
##
## Verify that a field is not NULL/Empty and its lenght is < maximal length
##
#macro(verify_field_length,$field_name,$field_value,$max_len)
#if($adc.isNull($field_value) || $field_value.isEmpty())
#error("Field '$field_name' can not be NULL or empty.")
#end
#if($field_value.length() > $max_len)
#error("Field '$field_name' can not be longer than $max_len chars. Current length is $field_value.length()")
#end
#end
##
## Map boolean value to ena/dis
##
#macro(os_print_bool_to_ena_dis, $boolean_val)
#if($boolean_val)
ena
#else
dis
#end
#end

View File

@ -0,0 +1,38 @@
#property("summary", "Init Indexes - date: [2013-05-08]")
#param("vip", "ip", "in", "prompt = IP address for the virtual service")
#param("virtSvcPort", "int", "in", "prompt = Virtual service tcp port")
## The index of the real server group
#param("groupId", "int", "out")
## The index of the virtual server
#param("virtId", "int", "out")
## If values are new or were pre-exsiting
#param("newValues", "bool", "out")
#set($newValues=true)
#set($virt = $adc.newBean("/c/slb/virt"))
#set($virt.ipAddress = $vip)
#set($virts = $adc.findAll($virt))
#foreach($virt in $virts)
#set($groupId = 0)
#set($virtId = $virt.index)
#set($virse=$adc.newBean("/c/slb/virt/service"))
#set($virse.VirtualServerIndex=$virtId)
#set($virse.VirtPort=$virtSvcPort)
##consider to also set $virse.Index=0 and do $adc.Read
#set($service=$adc.findFirst($virse))
#if($adc.isNotNull($service))
#set($newValues = false)
#set($virtId = $service.VirtualServerIndex)
#set($groupId = $service.RealGroup)
#break
#end
#end
#if($newValues)
#set($virtId = $adc.getFreeIndexWithDefault("/c/slb/virt", 1))
#set($groupId = $adc.getFreeIndexWithDefault("/c/slb/group", 1))
#end

View File

@ -0,0 +1,154 @@
#property("summary", "Manage Health Checks - date: [2013-05-13 4]")
## Manage Health Checks
#parse("openstack_l4/openstack_common.vm")
#param("group_id", "int", "in", "prompt=Group ID")
#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
#param("curr_hm_type_array", "string[]", "in", "prompt=Current Health monitors type array")
#param("hm_uuid_array", "string[]", "in", "prompt=Health monitors UUID array")
#param("hm_type_array", "string[]", "in", "prompt=Health monitors type array")
#param("hm_delay_array", "int[]", "in", "prompt=Health monitors delay array")
#param("hm_timeout_array", "int[]", "in", "prompt=Health monitors timeout array")
#param("hm_max_retries_array", "int[]", "in", "prompt=Health monitors max retries array")
#param("hm_admin_state_up_array", "bool[]", "in", "prompt=Health monitors admin state array")
#param("hm_url_path_array", "string[]", "in", "prompt=Health monitors url path array")
#param("hm_http_method_array", "string[]", "in", "prompt=Health monitors http method array")
#param("hm_expected_codes_array", "string[]", "in", "prompt=Health monitor expected codes_array")
#if($hm_uuid_array.size() > $HC_MAX_HCS_PER_GROUP)
#error("Got $hm_uuid_array.length ammount of health monitors which is more than the supported $HC_MAX_HCS_PER_GROUP health monitors per pool with alteon id $group_id on device $adc")
#end
## -----------------------------------------------------------------------------
## perpare a map of hcs connected to the group with their ref count
## -----------------------------------------------------------------------------
#set($currHCs={})
#set($counter=0)
#foreach ($hcUUID in $curr_hm_uuid_array)
#if($hcUUID != $NONE)
#set($hcType=$curr_hm_type_array[$counter])
#set($refCountRetArray=[-1])
#os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
##consider raising error if ref count is 0
#set($not_in_use=$currHCs.put($hcUUID, $refCountRetArray[0]))
#set($counter=$counter+1)
#end
#end
## ----------------------------------------------------
## prepare map of hcs to be connected to the group
## ----------------------------------------------------
#set($newHCs={})
#set($hcrIndex=0)
#foreach ($hcUUID in $hm_uuid_array)
#if($hcUUID!=$NONE)
#set($not_in_use=$newHCs.put($hcUUID, $hcrIndex))
#end
#set($hcrIndex=$hcrIndex+1)
#end
## ---------------------------------------
## handle the hcs that should be updated
## ---------------------------------------
#set($toUpdateHCs={})
#set($not_in_use=$toUpdateHCs.putAll($currHCs))
#set($not_in_use=$toUpdateHCs.keySet().retainAll($newHCs.keySet()))
#log("Health monitors to be updated: $toUpdateHCs")
#foreach ($entry in $toUpdateHCs.entrySet())
#set($hcRefCount=$entry.getValue())
#set($hcUUID=$entry.getKey())
#set($hcIndex=$newHCs.get($hcUUID))
#set($hcType=$hm_type_array[$hcIndex])
#set($hcDelay=$hm_delay_array[$hcIndex])
#set($hcTimeout=$hm_timeout_array[$hcIndex])
#set($hcMxRetries=$hm_max_retries_array[$hcIndex])
#set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
#set($hcUrlPath=$hm_url_path_array[$hcIndex])
#set($hcHttpMethod=$hm_http_method_array[$hcIndex])
#set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
#os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#end
## ---------------------------------------
## handle the hcs that should be created
## ---------------------------------------
#set($toCreateHCs={})
#set($not_in_use=$toCreateHCs.putAll($newHCs))
#set($not_in_use=$toCreateHCs.keySet().removeAll($currHCs.keySet()))
#log("Health monitors to be created: $toCreateHCs")
#foreach ($entry in $toCreateHCs.entrySet())
#set($hcUUID=$entry.getKey())
#set($hcIndex=$newHCs.get($hcUUID))
#set($hcType=$hm_type_array[$hcIndex])
#set($hcDelay=$hm_delay_array[$hcIndex])
#set($hcTimeout=$hm_timeout_array[$hcIndex])
#set($hcMxRetries=$hm_max_retries_array[$hcIndex])
#set($hcAdminStateUp=$hm_admin_state_up_array[$hcIndex])
#set($hcUrlPath=$hm_url_path_array[$hcIndex])
#set($hcHttpMethod=$hm_http_method_array[$hcIndex])
#set($hcExpectedCodes=$hm_expected_codes_array[$hcIndex])
#set($hcRefCount=1)
#set($refCountRetArray=[-1])
## query the device and check how many references this hc has already
#os_get_ref_count_for_hc($hcUUID, $hcType, $refCountRetArray)
#set($hcRefCount=$hcRefCount+$refCountRetArray[0])
#os_print_hc_config($hcType, $hcUUID,$hcRefCount,$hcMxRetries,$hcDelay, $hcTimeout,$hcUrlPath,$hcExpectedCodes)
#end
## ---------------------------------------
## handle the hcs that should be deleted
## ---------------------------------------
#set($toDelHCs={})
#set($not_in_use=$toDelHCs.putAll($currHCs))
#set($not_in_use=$toDelHCs.keySet().removeAll($newHCs.keySet()))
#log("Health monitors to be deleted: $toDelHCs")
#foreach ($entry in $toDelHCs.entrySet())
#set($hcUUID=$entry.getKey())
#set($hcRefCount=$entry.getValue())
#set($hcRefCount=$hcRefCount - 1)
#os_print_health_check_header($NONE, $hcUUID)
## if we still have positive ref count - keep the hc, else - remove it
#if($hcRefCount > 0)
name "$hcRefCount"
#else
del
#end
#end
## ------------------------------
## handle the logexp config block
## ------------------------------
#set($logExp="")
#set($counter=0)
#if($hm_uuid_array.size() > 0)
#foreach($hcUUID in $hm_uuid_array)
#if($hm_admin_state_up_array[$counter] && $hcUUID != $NONE)
#set($stripped_uuid=[-1])
#os_strip_uuid($hcUUID,$stripped_uuid)
#set($_log_exp_uuid=$stripped_uuid[0])
#set($logExp=$logExp + $_log_exp_uuid + "&")
#end
#set($counter=$counter+1)
#end
#if($logExp.length() > 0)
#set($len=$logExp.length() - 1)
#set($logExp=$logExp.substring(0,$len))
#else
#set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
#end
#else
#set($logExp=$DEFAULT_HEALTH_MONITOR_TYPE)
#end
/c/slb/advhc/health HC_Group_$group_id LOGEXP
logexp $logExp
/c/slb/group $group_id
health HC_Group_$group_id

View File

@ -0,0 +1,62 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Manage Configuration of L4 HA service date: [2013-05-08]")
#param("groupId", "int", "in", "prompt = Group id")
#param("virtId", "int", "in", "prompt = Virt id")
#param("virtServerEnabled", "bool", "in" "prompt = Is VIP enabled?")
#param("vip", "ip", "in", "prompt = IP address for the virtual service")
#param("virtSvcPort", "int", "in", "prompt = Virtual Service Port (0 means no value)")
#param("virtSvcType", "string", "in", "prompt = Virtual Service Type", "values=HTTP,HTTPS,TCP")
#param("svcPortAlgorithm", "string", "in", "prompt = Memeber Selection Algorithm", "values=ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP")
#param("groupEnabled", "bool", "in" "prompt = Is Group enabled?")
#param("virtSvcPersistMethod", "string", "in", "prompt = Virtual Service Persistence Method", "values=SOURCE_IP,HTTP_COOKIE,APP_COOKIE")
#param("virtSvcCookieName", "string", "in", "prompt = Virtual Service Cookie Name")
##setup global slb flags
/c/slb/adv/direct ena
#set($vipIpVer="#os_get_ip_version($vip)")
#set($vipIpVer=$vipIpVer.trim())
## name is maximum 31 characters
/c/slb/group $groupId
#verify_field_length("Group name","$groupId",$GROUP_NAME_MAX_LENGTH)
name "$groupId"
#os_print_metric_command($svcPortAlgorithm)
## The admin state of opens stack exists both on vip, pool and members
## As a memeber can only be assigned to one pool the effect of disabling the pool is the same as disbaling all of its memebers
## Currently, alteon does not have a method to disbale the pool
## #if($groupEnabled)
## ena
## #else
## dis
## #end
##clean the virt and virs before redoing the definition
/c/slb/virt $virtId
del
## vname is maximum 32 characters
/c/slb/virt $virtId
ipver $vipIpVer
vip $vip
#os_print_bool_to_ena_dis($virtServerEnabled)
#set($serviceType="#os_get_service_type($virtSvcType)")
#set($serviceType=$serviceType.trim())
/c/slb/virt $virtId/service $virtSvcPort $serviceType
group $groupId
rport 0
/c/slb/virt $virtId/service $virtSvcPort $serviceType
#os_print_persist_command($virtSvcPersistMethod, $virtSvcCookieName, $serviceType)
/c/slb/virt $virtId/service $virtSvcPort $serviceType/pip
mode address
#if($vipIpVer==$IPV4)
addr v4 $vip 255.255.255.255 v6 none persist disable
#else
addr v4 none v6 $vip 128 persist disable
#end

View File

@ -0,0 +1,156 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Manage Real IPs - date: [2013-05-13]")
##I am corrently using the needed function from this file so not to handle the path structure.
##parse("common_lib.vm")
## Manage RIPs
## Layer 4 part
#param("groupId", "int", "in", "prompt=Group ID")
#param("curRealServerIds", "int[]", "in", "prompt=Current Real Server IDs")
#param("memberIps", "ip[]", "in", "prompt=Updated Real Server IPs (0.0.0.0 means no value)")
#param("memberWeights", "int[]", "in", "prompt=Real Server Weights")
#param("memberPorts", "int[]", "in", "prompt=Real Server Ports")
#param("memberAdminStates", "bool[]", "in", "prompt=Real Server Admin States")
#param("realServerIds", "int[]", "out")
## implementation
#set($currRealServers={})
## calculate the current list of servers connected to the group
#foreach ($serverId in $curRealServerIds)
#if($serverId>0)
#set($rse = $adc.newBean("/c/slb/real"))
#set($rse.Index = $serverId)
#set($rse2 = $adc.read($rse))
#if ($adc.isNull($rse2))
#error ("Server $serverId was not found")
#end
#set($key="#generate_key($rse2)")
#set($key=$key.trim())
#set($addStatus=$currRealServers.put($key, $serverId))
#end
#end
#set($newRealServers={})
#set($memberIndex=0)
## calculate the new list of servers connected to the group
#foreach ($memberIp in $memberIps)
#if($memberIp!=$NO_IP)
#set($memberPort=$memberPorts[$memberIndex])
#set($key="#generate_key2($memberIp, $memberPort)")
#set($key=$key.trim())
#set($addStatus=$newRealServers.put($key, $memberIndex))
#end
#set($memberIndex=$memberIndex+1)
#end
#set($toUpdateRealServers={})
#set($addStatus=$toUpdateRealServers.putAll($currRealServers))
#set($addStatus=$toUpdateRealServers.keySet().retainAll($newRealServers.keySet()))
#log("Real servers to be updated: $toUpdateRealServers")
#foreach ($entry in $toUpdateRealServers.entrySet())
#set($updateId=$entry.getValue())
#set($updateKey=$entry.getKey())
#set($memberIndex=$newRealServers.get($updateKey))
#set($memberWeight=$memberWeights[$memberIndex])
#set($memberAdminState=$memberAdminStates[$memberIndex])
/c/slb/real $updateId
#print_weight($memberWeight)
#os_print_bool_to_ena_dis($memberAdminState)
#end
#set($createId=0)
#set($toCreateRealServers={})
#set($addStatus=$toCreateRealServers.putAll($newRealServers))
#set($addStatus=$toCreateRealServers.keySet().removeAll($currRealServers.keySet()))
#log("Real servers to be created: $toCreateRealServers")
#foreach ($entry in $toCreateRealServers.entrySet())
#set($createId=$adc.getFreeIndex("/c/slb/real", $createId))
#set($memberIndex=$entry.getValue())
#set($memberWeight=$memberWeights[$memberIndex])
#set($memberPort=$memberPorts[$memberIndex])
#set($memberIp=$memberIps[$memberIndex])
#set($memberIpVer="#os_get_ip_version($memberIp)")
#set($memberIpVer=$memberIpVer.trim())
#set($memberAdminState=$memberAdminStates[$memberIndex])
/c/slb/real $createId
ipver $memberIpVer
rip $memberIp
#print_weight($memberWeight)
addport $memberPort
#os_print_bool_to_ena_dis($memberAdminState)
/c/slb/group $groupId
ipver $memberIpVer
add $createId
#end
#set($toDelRealServers={})
#set($addStatus=$toDelRealServers.putAll($currRealServers))
#set($addStatus=$toDelRealServers.keySet().removeAll($newRealServers.keySet()))
#log("Real servers to be deleted: $toDelRealServers")
#foreach ($delId in $toDelRealServers.values())
/c/slb/group $groupId
rem $delId
/c/slb/real $delId
del
#end
#set($realServerIds = [])
#set($group1 = $adc.newBean("/c/slb/group"))
#set($group1.Index = $groupId)
#set($group2 = $adc.read($group1))
#set($realServerIds=$adc.readNumbersFromBitmapPlusOne($group2.RealServers))
#macro(generate_key, $rsBean)
#set($ret_key="")
#if ($rsBean.IpVer == "IPV4")
#set($ret_key=$ret_key+$rsBean.IpAddr)
#else
#set($ret_key=$ret_key+$rsBean.Ipv6Addr)
#end
#set($rports=[])
#set($dummy="#get_real_server_rports($rsBean.Index, $rports)")
#foreach($rport in $rports)
#set($ret_key=$ret_key+"-"+$rport)
#end
$ret_key
#end
#macro(generate_key2, $ip_address, $rport)
#set($ret_key="")
#set($ret_key=$ret_key + $ip_address + "-" + $rport)
$ret_key
#end
#macro(get_real_server_rports, $rs_id, $rports)
#set($pe = $adc.newBean('SlbNewCfgRealServPortEntry'))
#set($pe.Index=1)
#set($pe.RealServIndex= $rs_id)
#set($pei = $adc.read($pe))
#if($adc.isNotNull($pei))
#set($dummy=$rports.add($pei.RealPort))
#else
#set($dummy=$rports.add(0))
#end
#end
#macro(print_weight, $weight)
#set($weight=1+$weight/5)
weight $weight
#end

View File

@ -0,0 +1,35 @@
#parse("openstack_l4/openstack_common.vm")
#property("summary", "Delete L4 service and Real Servers - date: [2013-05-12]")
## Layer 4 part
#param("virtId", "int", "in", "prompt=Virt ID")
#param("groupId", "int", "in", "prompt=Group ID")
#param("curRealServerIds", "int[]", "in", "prompt=Real Server IDs")
#param("curr_hm_uuid_array", "string[]", "in", "prompt=Current Health monitors UUID array")
## L4 implementation
/c/slb/virt $virtId
del
## set back the group health check to default
/c/slb/group $groupId
del
## remove the LOGEXP part
/c/slb/advhc/health HC_Group_$groupId
del
#foreach ($uuid in $curr_hm_uuid_array)
#set($stripped_uuid=[-1])
#os_strip_uuid($uuid,$stripped_uuid)
/c/slb/advhc/health $stripped_uuid[0]
del
#end
#foreach ($serverId in $curRealServerIds)
/c/slb/real $serverId
del
#end

View File

@ -0,0 +1,258 @@
<?xml version="1.0" ?>
<workflow createAction="init" deleteAction="teardown" name="openstack_l4" xmlns="http://www.radware.com/vdirect">
<description>L4 Workflow for OpenStack LBaaS [2013-07-25 11:50:20.501000]</description>
<persist>
<parameters>
<!-- The vDirect Service -->
<parameter name="service" prompt="Radware vDirect ADC Service" type="adcService"/>
<!-- Vip -->
<parameter defaultValue="1.1.1.1" name="vip_address" prompt="VIP address" type="ip"/>
<parameter defaultValue="80" name="vip_protocol_port" prompt="VIP port" type="int"/>
<parameter defaultValue="HTTP" name="vip_protocol" prompt="VIP protocol" type="string" values="HTTP,HTTPS,TCP"/>
<parameter defaultValue="10" name="vip_connection_limit" prompt="VIP connection limit" type="int"/>
<parameter defaultValue="true" name="vip_admin_state_up" prompt="Is VIP enabled?" type="bool"/>
<parameter defaultValue="SOURCE_IP" name="vip_session_persistence_type" prompt="VIP session persistence type" type="string" values="SOURCE_IP,HTTP_COOKIE,APP_COOKIE"/>
<parameter defaultValue="none" name="vip_session_persistence_cookie_name" prompt="VIP session persistence cookie name" type="string"/>
<!-- Pool -->
<!-- pool_protocol is currently not used and expected to be the same as the vip_protocol-->
<parameter defaultValue="HTTP" name="pool_protocol" prompt="Pool protocol" type="string" values="HTTP,HTTPS,TCP"/>
<parameter defaultValue="ROUND_ROBIN" name="pool_lb_method" prompt="Pool LB method" type="string" values="ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP"/>
<parameter defaultValue="true" name="pool_admin_state_up" prompt="Is Pool enabled?" type="bool"/>
<!-- Members -->
<parameter defaultValue="1.1.1.2" name="member_address_array" prompt="Members address array" type="ip[]"/>
<parameter defaultValue="81" name="member_protocol_port_array" prompt="Members protocol port array" type="int[]"/>
<parameter defaultValue="30" name="member_weight_array" prompt="Members weight array" type="int[]"/>
<parameter defaultValue="true" name="member_admin_state_up_array" prompt="Are members enabled?" type="bool[]"/>
<!-- Health Monitors -->
<parameter defaultValue="none" name="hm_uuid_array" prompt="Health monitors UUID array" type="string[]"/>
<parameter defaultValue="HTTP" name="hm_type_array" prompt="Health monitors type array" type="string[]" values="PING,TCP,HTTP,HTTPS"/>
<parameter defaultValue="10" name="hm_delay_array" prompt="Health monitors delay array" type="int[]"/>
<parameter defaultValue="20" name="hm_timeout_array" prompt="Health monitors timeout array" type="int[]"/>
<parameter defaultValue="3" name="hm_max_retries_array" prompt="Health monitors max retries array" type="int[]"/>
<parameter defaultValue="true" name="hm_admin_state_up_array" prompt="Health monitors admin state array" type="bool[]"/>
<parameter defaultValue="/" name="hm_url_path_array" prompt="Health monitors url path array" type="string[]"/>
<parameter defaultValue="GET" name="hm_http_method_array" prompt="Health monitors http method array" type="string[]"/>
<parameter defaultValue="200" name="hm_expected_codes_array" prompt="Health monitor expected codes_array" type="string[]"/>
<parameter defaultValue="save" name="apply_type" prompt="Device apply type" type="string" values="none,apply,save"/>
<!-- Calculated -->
<parameter name="ha_enabled" type="bool"/>
<parameter name="need_new_values" type="bool"/>
<parameter name="virt_id1" type="int"/>
<parameter name="group_id1" type="int"/>
<parameter name="new_values1" type="bool"/>
<parameter name="real_server_ids1" type="int[]"/>
<parameter name="virt_id2" type="int"/>
<parameter name="group_id2" type="int"/>
<parameter name="new_values2" type="bool"/>
<parameter name="real_server_ids2" type="int[]"/>
<parameter name="curr_hm_uuid_array" type="string[]"/>
<parameter name="curr_hm_type_array" type="string[]"/>
<parameter defaultValue="none" name="none_value" prompt="Constant for none" type="string" values="none"/>
<!-- Calculated -->
</parameters>
</persist>
<states>
<state name="initialized"/>
<state name="baseapplied"/>
<state name="removed"/>
</states>
<actions>
<action fromState="none" name="init" toState="initialized">
<inputs>
<parameters>
<parameter name="service"/>
</parameters>
</inputs>
<sequence>
<log message="This may take time... making sure that the service is provisioned."/>
<script file="wait_for_service.groovy" name="wait_for_service"/>
<log message="Service is provisioned."/>
<set saveAs="$ha_enabled" value="$service.request.ha"/>
</sequence>
<onError>
<log message="Service is not ready!!"/>
</onError>
</action>
<action fromState="initialized,baseapplied" name="BaseCreate" toState="baseapplied">
<inputs>
<parameters>
<parameter name="vip_address"/>
<parameter name="vip_protocol_port"/>
<parameter name="vip_protocol"/>
<parameter name="vip_connection_limit"/>
<parameter name="vip_admin_state_up"/>
<parameter name="vip_session_persistence_type"/>
<parameter name="vip_session_persistence_cookie_name"/>
<parameter name="pool_protocol"/>
<parameter name="pool_lb_method"/>
<parameter name="pool_admin_state_up"/>
<parameter name="member_address_array"/>
<parameter name="member_protocol_port_array"/>
<parameter name="member_weight_array"/>
<parameter name="member_admin_state_up_array"/>
<parameter name="hm_uuid_array"/>
<parameter name="hm_type_array"/>
<parameter name="hm_delay_array"/>
<parameter name="hm_timeout_array"/>
<parameter name="hm_max_retries_array"/>
<parameter name="hm_admin_state_up_array"/>
<parameter name="hm_url_path_array"/>
<parameter name="hm_http_method_array"/>
<parameter name="hm_expected_codes_array"/>
</parameters>
</inputs>
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<!--Handle the first time we run for all services-->
<sequence ifState="initialized">
<set saveAs="$need_new_values" value="true"/>
<set saveAs="$none_value" value="'none'"/>
<set saveAs="$real_server_ids1" value="[0]"/>
<set saveAs="$real_server_ids2" value="[0]"/>
<set saveAs="$curr_hm_uuid_array" value="['none']"/>
<set saveAs="$curr_hm_type_array" value="['none']"/>
</sequence>
<!--Handle the need to allocated indexes if not allocated-->
<sequence if="$need_new_values">
<configuration file="openstack_init_indexes.vm" name="init_indexes_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<!-- Outputs -->
<map from="$group_id1" to="$groupId"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$new_values1" to="$newValues"/>
</parameterMapping>
</configuration>
<error if="!$new_values1" message="Service on vip $vip_address and port $vip_protocol_port already exists on device $adc1!"/>
<configuration file="openstack_init_indexes.vm" if="$ha_enabled" name="init_indexes_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<!-- Outputs -->
<map from="$group_id2" to="$groupId"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$new_values2" to="$newValues"/>
</parameterMapping>
</configuration>
<error if="$ha_enabled &amp;&amp; !$new_values2" message="Service on vip $vip_address and port $vip_protocol_port already exists on device $adc2!"/>
<set saveAs="$need_new_values" value="false"/>
</sequence>
<sequence>
<configuration file="openstack_manage_l4.vm" name="manage_l4_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$groupId"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$vip_admin_state_up" to="$virtServerEnabled"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<map from="$vip_protocol" to="$virtSvcType"/>
<map from="$pool_lb_method" to="$svcPortAlgorithm"/>
<map from="$pool_admin_state_up" to="$groupEnabled"/>
<map from="$vip_session_persistence_type" to="$virtSvcPersistMethod"/>
<map from="$vip_session_persistence_cookie_name" to="$virtSvcCookieName"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_l4.vm" if="$ha_enabled" name="manage_l4_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$groupId"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$vip_admin_state_up" to="$virtServerEnabled"/>
<map from="$vip_address" to="$vip"/>
<map from="$vip_protocol_port" to="$virtSvcPort"/>
<map from="$vip_protocol" to="$virtSvcType"/>
<map from="$pool_lb_method" to="$svcPortAlgorithm"/>
<map from="$pool_admin_state_up" to="$groupEnabled"/>
<map from="$vip_session_persistence_type" to="$virtSvcPersistMethod"/>
<map from="$vip_session_persistence_cookie_name" to="$virtSvcCookieName"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_rips.vm" name="manage_rips_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$groupId"/>
<map from="$real_server_ids1" to="$curRealServerIds"/>
<map from="$member_address_array" to="$memberIps"/>
<map from="$member_weight_array" to="$memberWeights"/>
<map from="$member_protocol_port_array" to="$memberPorts"/>
<map from="$member_admin_state_up_array" to="$memberAdminStates"/>
<!--output parameters-->
<map from="$real_server_ids1" to="$realServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_rips.vm" if="$ha_enabled" name="manage_rips_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$groupId"/>
<map from="$real_server_ids2" to="$curRealServerIds"/>
<map from="$member_address_array" to="$memberIps"/>
<map from="$member_weight_array" to="$memberWeights"/>
<map from="$member_protocol_port_array" to="$memberPorts"/>
<map from="$member_admin_state_up_array" to="$memberAdminStates"/>
<!--output parameters-->
<map from="$real_server_ids2" to="$realServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_hcs.vm" name="manage_hcs_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$group_id1" to="$group_id"/>
</parameterMapping>
</configuration>
<configuration file="openstack_manage_hcs.vm" if="$ha_enabled" name="manage_hcs_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$group_id2" to="$group_id"/>
</parameterMapping>
</configuration>
</sequence>
<!--Now commit to the device-->
<sequence>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
<set saveAs="$curr_hm_uuid_array" value="$hm_uuid_array"/>
<set saveAs="$curr_hm_type_array" value="$hm_type_array"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
<action name="teardown" toState="removed">
<devices>
<device device="$service.primary" name="adc1"/>
<device device="$service.secondary" if="$ha_enabled" name="adc2"/>
</devices>
<!--handle when service was defined-->
<sequence if="!$need_new_values">
<configuration file="openstack_teardown_l4.vm" name="teardown_adc1">
<parameterMapping>
<map from="$adc1" to="$adc"/>
<map from="$virt_id1" to="$virtId"/>
<map from="$group_id1" to="$groupId"/>
<map from="$real_server_ids1" to="$curRealServerIds"/>
</parameterMapping>
</configuration>
<configuration file="openstack_teardown_l4.vm" if="$ha_enabled" name="teardown_adc2">
<parameterMapping>
<map from="$adc2" to="$adc"/>
<map from="$virt_id2" to="$virtId"/>
<map from="$group_id2" to="$groupId"/>
<map from="$real_server_ids2" to="$curRealServerIds"/>
</parameterMapping>
</configuration>
</sequence>
<!--Now commit to the device-->
<sequence>
<commit apply="$apply_type != 'none'" save="$apply_type == 'save'"/>
</sequence>
<onError>
<autoRevert/>
</onError>
</action>
</actions>
</workflow>

View File

@ -0,0 +1,11 @@
The Radware LBaaS driver uploads ADC workflows on-demand into vDirect. The ADC Workflows are composed from files which are located underneath this workflows directory.
The workflows directory is part of the Radware LBaaS driver code included in OpenStack.
Those ADC Workflows are instantiated and run in the vDirect Virtual Machine.
Radware's OpenStack LBaaS driver, uses vDirect REST API to activate those workflows and CRUD configuration in the Alteon device.
An ADC workflow is composed from:
1. A mandatory XML file called workflow.xml which defines the different states and the transition flow between states as well as "linking" to the actual code that can be done on each state.
2. ADC Configuration Template files with extension .vm which are using an extended apache velocity template engine syntax
3. ADC Configuration Groovy script file with extension .groovy

View File

@ -0,0 +1,15 @@
# copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware

View File

@ -0,0 +1,409 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import re
from eventlet import greenthread
import mock
from neutron import context
from neutron.extensions import loadbalancer
from neutron import manager
from neutron.openstack.common import jsonutils as json
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers.radware import driver
from neutron.tests.unit.db.loadbalancer import test_db_loadbalancer
GET_200 = ('/api/workflow/', '/api/service/', '/api/workflowTemplate')
def rest_call_function_mock(action, resource, data, headers, binary=False):
if rest_call_function_mock.RESPOND_WITH_ERROR:
return 400, 'error_status', 'error_reason', None
if action == 'GET':
return _get_handler(resource)
elif action == 'DELETE':
return _delete_handler(resource)
elif action == 'POST':
return _post_handler(resource, binary)
else:
return 0, None, None, None
def _get_handler(resource):
if resource == GET_200[2]:
data = json.loads('[{"name":"a"},{"name":"b"}]')
return 200, '', '', data
if resource in GET_200:
return 200, '', '', ''
else:
data = json.loads('{"complete":"True", "success": "True"}')
return 202, '', '', data
def _delete_handler(resource):
return 202, '', '', {'message': 'Not Found'}
def _post_handler(resource, binary):
if re.search(r'/api/workflow/.+/action/.+', resource):
data = json.loads('{"uri":"some_uri"}')
return 202, '', '', data
elif re.search(r'/api/service\?name=.+', resource):
data = json.loads('{"links":{"actions":{"provision":"someuri"}}}')
return 201, '', '', data
elif binary:
return 201, '', '', ''
else:
return 202, '', '', ''
RADWARE_PROVIDER = ('LOADBALANCER:radware:neutron.services.'
'loadbalancer.drivers.radware.driver.'
'LoadBalancerDriver:default')
class TestLoadBalancerPluginBase(
test_db_loadbalancer.LoadBalancerPluginDbTestCase):
def setUp(self):
super(TestLoadBalancerPluginBase, self).setUp(
lbaas_provider=RADWARE_PROVIDER)
loaded_plugins = manager.NeutronManager().get_service_plugins()
self.plugin_instance = loaded_plugins[constants.LOADBALANCER]
class TestLoadBalancerPlugin(TestLoadBalancerPluginBase):
def setUp(self):
super(TestLoadBalancerPlugin, self).setUp()
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': False})
self.rest_call_mock = mock.Mock(name='rest_call_mock',
side_effect=rest_call_function_mock,
spec=self.plugin_instance.
drivers['radware'].
rest_client.call)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver.rest_client.call = self.rest_call_mock
self.ctx = context.get_admin_context()
self.addCleanup(radware_driver.completion_handler.join)
self.addCleanup(mock.patch.stopall)
def test_create_vip_failure(self):
"""Test the rest call failure handling by Exception raising."""
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
rest_call_function_mock.__dict__.update(
{'RESPOND_WITH_ERROR': True})
self.assertRaises(StandardError,
self.plugin_instance.create_vip,
(self.ctx, {'vip': vip_data}))
def test_create_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
# Test creation REST calls
calls = [
mock.call('GET', u'/api/service/srv_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', u'/api/service?name=srv_' +
subnet['subnet']['network_id'], mock.ANY,
driver.CREATE_SERVICE_HEADER),
mock.call('GET', u'/api/workflow/l2_l3_' +
subnet['subnet']['network_id'], None, None),
mock.call('POST', '/api/workflow/l2_l3_' +
subnet['subnet']['network_id'] +
'/action/setup_l2_l3',
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('POST', 'someuri',
None, driver.PROVISION_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
driver.L4_WORKFLOW_TEMPLATE_NAME +
'?name=' + pool['pool']['id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflowTemplate/' +
driver.L2_L3_WORKFLOW_TEMPLATE_NAME +
'?name=l2_l3_' + subnet['subnet']['network_id'],
mock.ANY,
driver.TEMPLATE_HEADER),
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER),
mock.call('GET', '/api/workflow/' +
pool['pool']['id'], None, None)
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
# sleep to wait for the operation completion
greenthread.sleep(1)
#Test DB
new_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(new_vip['status'], 'ACTIVE')
# Delete VIP
self.plugin_instance.delete_vip(self.ctx, vip['id'])
# Test deletion REST calls
calls = [
mock.call('DELETE', u'/api/workflow/' + pool['pool']['id'],
None, None)
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
def test_update_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware', no_delete=True) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
vip_data['status'] = 'PENDING_UPDATE'
self.plugin_instance.update_vip(self.ctx, vip['id'],
{'vip': vip_data})
# Test REST calls
calls = [
mock.call('POST', '/api/workflow/' + pool['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER),
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(updated_vip['status'], 'PENDING_UPDATE')
# sleep to wait for the operation completion
greenthread.sleep(1)
updated_vip = self.plugin_instance.get_vip(self.ctx, vip['id'])
self.assertEqual(updated_vip['status'], 'ACTIVE')
# delete VIP
self.plugin_instance.delete_vip(self.ctx, vip['id'])
def test_delete_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware', no_delete=True) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': 'PENDING_CREATE',
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
self.ctx, {'vip': vip_data})
self.plugin_instance.delete_vip(self.ctx, vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.rest_call_mock.assert_has_calls(calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
self.ctx, vip['id'])
# add test checking all vip graph objects were removed from DB
def test_delete_pool_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware', no_delete=True) as pool:
with self.vip(pool=pool, subnet=subnet):
self.assertRaises(loadbalancer.PoolInUse,
self.plugin_instance.delete_pool,
self.ctx, pool['pool']['id'])
def test_create_member_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as p:
with self.vip(pool=p, subnet=subnet):
with self.member(pool_id=p['pool']['id']):
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(calls,
any_order=True)
def test_update_member_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
self.ctx, member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(calls,
any_order=True)
updated_member = self.plugin_instance.get_member(
self.ctx, member['member']['id']
)
# sleep to wait for the operation completion
greenthread.sleep(1)
updated_member = self.plugin_instance.get_member(
self.ctx, member['member']['id']
)
self.assertEqual(updated_member['status'], 'ACTIVE')
def test_update_member_without_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet():
with self.pool(provider='radware') as pool:
with self.member(pool_id=pool['pool']['id']) as member:
member['member']['status'] = 'PENDING_UPDATE'
updated_member = self.plugin_instance.update_member(
self.ctx, member['member']['id'], member
)
self.assertEqual(updated_member['status'],
'PENDING_UPDATE')
def test_delete_member_with_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet() as subnet:
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'],
no_delete=True) as m:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.delete_member(self.ctx,
m['member']['id'])
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/' + driver.L4_ACTION_NAME,
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.rest_call_mock.assert_has_calls(calls,
any_order=True)
greenthread.sleep(1)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
self.ctx, m['member']['id'])
def test_delete_member_without_vip(self):
self.rest_call_mock.reset_mock()
with self.subnet():
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
self.plugin_instance.delete_member(
self.ctx, m['member']['id']
)
self.assertRaises(loadbalancer.MemberNotFound,
self.plugin_instance.get_member,
self.ctx, m['member']['id'])

View File

@ -21,6 +21,7 @@ from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import servicetype_db as sdb
from neutron.extensions import agent
from neutron.extensions import lbaas_agentscheduler
from neutron import manager
@ -78,6 +79,9 @@ class LBaaSAgentSchedulerTestCase(test_agent_ext_plugin.AgentDBTestMixIn,
'HaproxyOnHostPluginDriver:default')],
'service_providers')
#force service type manager to reload configuration:
sdb.ServiceTypeManager._instance = None
super(LBaaSAgentSchedulerTestCase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()