remove unnecessary neutron files under neutron/services/

Change-Id: I795afc55b72306507ec6717838150fc2fed7a15d
This commit is contained in:
Isaku Yamahata 2014-06-26 16:11:04 +09:00
parent b5607354c0
commit 872cab4426
80 changed files with 0 additions and 11333 deletions

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,85 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc.
# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc.
from oslo.config import cfg
from neutron.common import rpc_compat
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FWaaSOpts = [
cfg.StrOpt(
'driver',
default='',
help=_("Name of the FWaaS Driver")),
cfg.BoolOpt(
'enabled',
default=False,
help=_("Enable FWaaS")),
]
cfg.CONF.register_opts(FWaaSOpts, 'fwaas')
class FWaaSPluginApiMixin(rpc_compat.RpcProxy):
"""Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
RPC_API_VERSION = '1.0'
def __init__(self, topic, host):
super(FWaaSPluginApiMixin,
self).__init__(topic=topic,
default_version=self.RPC_API_VERSION)
self.host = host
def set_firewall_status(self, context, firewall_id, status):
"""Make a RPC to set the status of a firewall."""
return self.call(context,
self.make_msg('set_firewall_status', host=self.host,
firewall_id=firewall_id, status=status),
topic=self.topic)
def firewall_deleted(self, context, firewall_id):
"""Make a RPC to indicate that the firewall resources are deleted."""
return self.call(context,
self.make_msg('firewall_deleted', host=self.host,
firewall_id=firewall_id),
topic=self.topic)
class FWaaSAgentRpcCallbackMixin(object):
"""Mixin for FWaaS agent Implementations."""
def __init__(self, host):
super(FWaaSAgentRpcCallbackMixin, self).__init__(host)
def create_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to create a firewall."""
pass
def update_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to update a firewall."""
pass
def delete_firewall(self, context, firewall, host):
"""Handle RPC cast from plugin to delete a firewall."""
pass

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,295 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
# @author: Sridar Kandaswamy, skandasw@cisco.com, Cisco Systems, Inc.
# @author: Dan Florea, dflorea@cisco.com, Cisco Systems, Inc.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.common import topics
from neutron import context
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.firewall.agents import firewall_agent_api as api
LOG = logging.getLogger(__name__)
class FWaaSL3PluginApi(api.FWaaSPluginApiMixin):
"""Agent side of the FWaaS agent to FWaaS Plugin RPC API."""
def __init__(self, topic, host):
super(FWaaSL3PluginApi, self).__init__(topic, host)
def get_firewalls_for_tenant(self, context, **kwargs):
"""Get the Firewalls with rules from the Plugin to send to driver."""
LOG.debug(_("Retrieve Firewall with rules from Plugin"))
return self.call(context,
self.make_msg('get_firewalls_for_tenant',
host=self.host),
topic=self.topic)
def get_tenants_with_firewalls(self, context, **kwargs):
"""Get all Tenants that have Firewalls configured from plugin."""
LOG.debug(_("Retrieve Tenants with Firewalls configured from Plugin"))
return self.call(context,
self.make_msg('get_tenants_with_firewalls',
host=self.host),
topic=self.topic)
class FWaaSL3AgentRpcCallback(api.FWaaSAgentRpcCallbackMixin):
"""FWaaS Agent support to be used by Neutron L3 agent."""
def __init__(self, conf):
LOG.debug(_("Initializing firewall agent"))
self.conf = conf
fwaas_driver_class_path = cfg.CONF.fwaas.driver
self.fwaas_enabled = cfg.CONF.fwaas.enabled
if self.fwaas_enabled:
try:
self.fwaas_driver = importutils.import_object(
fwaas_driver_class_path)
LOG.debug(_("FWaaS Driver Loaded: '%s'"),
fwaas_driver_class_path)
except ImportError:
msg = _('Error importing FWaaS device driver: %s')
raise ImportError(msg % fwaas_driver_class_path)
self.services_sync = False
self.root_helper = config.get_root_helper(conf)
# setup RPC to msg fwaas plugin
self.fwplugin_rpc = FWaaSL3PluginApi(topics.FIREWALL_PLUGIN,
conf.host)
super(FWaaSL3AgentRpcCallback, self).__init__(host=conf.host)
def _get_router_info_list_for_tenant(self, routers, tenant_id):
"""Returns the list of router info objects on which to apply the fw."""
root_ip = ip_lib.IPWrapper(self.root_helper)
# Get the routers for the tenant
router_ids = [
router['id']
for router in routers
if router['tenant_id'] == tenant_id]
local_ns_list = root_ip.get_namespaces(
self.root_helper) if self.conf.use_namespaces else []
router_info_list = []
# Pick up namespaces for Tenant Routers
for rid in router_ids:
# for routers without an interface - get_routers returns
# the router - but this is not yet populated in router_info
if rid not in self.router_info:
continue
if self.router_info[rid].use_namespaces:
router_ns = self.router_info[rid].ns_name
if router_ns in local_ns_list:
router_info_list.append(self.router_info[rid])
else:
router_info_list.append(self.router_info[rid])
return router_info_list
def _invoke_driver_for_plugin_api(self, context, fw, func_name):
"""Invoke driver method for plugin API and provide status back."""
LOG.debug(_("%(func_name)s from agent for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
try:
routers = self.plugin_rpc.get_routers(context)
router_info_list = self._get_router_info_list_for_tenant(
routers,
fw['tenant_id'])
if not router_info_list:
LOG.debug(_('No Routers on tenant: %s'), fw['tenant_id'])
# fw was created before any routers were added, and if a
# delete is sent then we need to ack so that plugin can
# cleanup.
if func_name == 'delete_firewall':
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
return
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
# call into the driver
try:
self.fwaas_driver.__getattribute__(func_name)(
router_info_list,
fw)
if fw['admin_state_up']:
status = constants.ACTIVE
else:
status = constants.DOWN
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error for %(func_name)s "
"for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
status = constants.ERROR
# delete needs different handling
if func_name == 'delete_firewall':
if status in [constants.ACTIVE, constants.DOWN]:
self.fwplugin_rpc.firewall_deleted(context, fw['id'])
else:
self.fwplugin_rpc.set_firewall_status(
context,
fw['id'],
status)
except Exception:
LOG.exception(
_("FWaaS RPC failure in %(func_name)s for fw: %(fwid)s"),
{'func_name': func_name, 'fwid': fw['id']})
self.services_sync = True
return
def _invoke_driver_for_sync_from_plugin(self, ctx, router_info_list, fw):
"""Invoke the delete driver method for status of PENDING_DELETE and
update method for all other status to (re)apply on driver which is
Idempotent.
"""
if fw['status'] == constants.PENDING_DELETE:
try:
self.fwaas_driver.delete_firewall(router_info_list, fw)
self.fwplugin_rpc.firewall_deleted(
ctx,
fw['id'])
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
constants.ERROR)
else:
# PENDING_UPDATE, PENDING_CREATE, ...
try:
self.fwaas_driver.update_firewall(router_info_list, fw)
if fw['admin_state_up']:
status = constants.ACTIVE
else:
status = constants.DOWN
except fw_ext.FirewallInternalDriverError:
LOG.error(_("Firewall Driver Error on fw state %(fwmsg)s "
"for fw: %(fwid)s"),
{'fwmsg': fw['status'], 'fwid': fw['id']})
status = constants.ERROR
self.fwplugin_rpc.set_firewall_status(
ctx,
fw['id'],
status)
def _process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
LOG.debug(_("Process router add, router_id: '%s'"), ri.router['id'])
routers = []
routers.append(ri.router)
router_info_list = self._get_router_info_list_for_tenant(
routers,
ri.router['tenant_id'])
if router_info_list:
# Get the firewall with rules
# for the tenant the router is on.
ctx = context.Context('', ri.router['tenant_id'])
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
LOG.debug(_("Process router add, fw_list: '%s'"),
[fw['id'] for fw in fw_list])
for fw in fw_list:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
def process_router_add(self, ri):
"""On router add, get fw with rules from plugin and update driver."""
# avoid msg to plugin when fwaas is not configured
if not self.fwaas_enabled:
return
try:
self._process_router_add(ri)
except Exception:
LOG.exception(
_("FWaaS RPC info call failed for '%s'."),
ri.router['id'])
self.services_sync = True
def process_services_sync(self, ctx):
"""On RPC issues sync with plugin and apply the sync data."""
# avoid msg to plugin when fwaas is not configured
if not self.fwaas_enabled:
return
try:
# get all routers
routers = self.plugin_rpc.get_routers(ctx)
# get the list of tenants with firewalls configured
# from the plugin
tenant_ids = self.fwplugin_rpc.get_tenants_with_firewalls(ctx)
LOG.debug(_("Tenants with Firewalls: '%s'"), tenant_ids)
for tenant_id in tenant_ids:
ctx = context.Context('', tenant_id)
fw_list = self.fwplugin_rpc.get_firewalls_for_tenant(ctx)
if fw_list:
# if fw present on tenant
router_info_list = self._get_router_info_list_for_tenant(
routers,
tenant_id)
if router_info_list:
LOG.debug(_("Router List: '%s'"),
[ri.router['id'] for ri in router_info_list])
LOG.debug(_("fw_list: '%s'"),
[fw['id'] for fw in fw_list])
# apply sync data on fw for this tenant
for fw in fw_list:
# fw, routers present on this host for tenant
# install
LOG.debug(_("Apply fw on Router List: '%s'"),
[ri.router['id']
for ri in router_info_list])
# no need to apply sync data for ACTIVE fw
if fw['status'] != constants.ACTIVE:
self._invoke_driver_for_sync_from_plugin(
ctx,
router_info_list,
fw)
self.services_sync = False
except Exception:
LOG.exception(_("Failed fwaas process services sync"))
self.services_sync = True
def create_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to create a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'create_firewall')
def update_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to update a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'update_firewall')
def delete_firewall(self, context, firewall, host):
"""Handle Rpc from plugin to delete a firewall."""
return self._invoke_driver_for_plugin_api(
context,
firewall,
'delete_firewall')

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,147 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, gduan@varmour.com, vArmour Networks
import base64
import httplib2
from oslo.config import cfg
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
OPTS = [
cfg.StrOpt('director', default='localhost',
help=_("vArmour director ip")),
cfg.StrOpt('director_port', default='443',
help=_("vArmour director port")),
cfg.StrOpt('username', default='varmour',
help=_("vArmour director username")),
cfg.StrOpt('password', default='varmour', secret=True,
help=_("vArmour director password")), ]
cfg.CONF.register_opts(OPTS, "vArmour")
LOG = logging.getLogger(__name__)
REST_URL_PREFIX = '/api/v1.0'
class vArmourAPIException(Exception):
message = _("An unknown exception.")
def __init__(self, **kwargs):
try:
self.err = self.message % kwargs
except Exception:
self.err = self.message
def __str__(self):
return self.err
class AuthenticationFailure(vArmourAPIException):
message = _("Invalid login credential.")
class vArmourRestAPI(object):
def __init__(self):
LOG.debug(_('vArmourRestAPI: started'))
self.user = cfg.CONF.vArmour.username
self.passwd = cfg.CONF.vArmour.password
self.server = cfg.CONF.vArmour.director
self.port = cfg.CONF.vArmour.director_port
self.timeout = 3
self.key = ''
def auth(self):
headers = {}
enc = base64.b64encode(self.user + ':' + self.passwd)
headers['Authorization'] = 'Basic ' + enc
resp = self.rest_api('POST', va_utils.REST_URL_AUTH, None, headers)
if resp and resp['status'] == 200:
self.key = resp['body']['auth']
return True
else:
raise AuthenticationFailure()
def commit(self):
self.rest_api('POST', va_utils.REST_URL_COMMIT)
def rest_api(self, method, url, body=None, headers=None):
url = REST_URL_PREFIX + url
if body:
body_data = json.dumps(body)
else:
body_data = ''
if not headers:
headers = {}
enc = base64.b64encode('%s:%s' % (self.user, self.key))
headers['Authorization'] = 'Basic ' + enc
LOG.debug(_("vArmourRestAPI: %(server)s %(port)s"),
{'server': self.server, 'port': self.port})
try:
action = "https://" + self.server + ":" + self.port + url
LOG.debug(_("vArmourRestAPI Sending: "
"%(method)s %(action)s %(headers)s %(body_data)s"),
{'method': method, 'action': action,
'headers': headers, 'body_data': body_data})
h = httplib2.Http(timeout=3,
disable_ssl_certificate_validation=True)
resp, resp_str = h.request(action, method,
body=body_data,
headers=headers)
LOG.debug(_("vArmourRestAPI Response: %(status)s %(resp_str)s"),
{'status': resp.status, 'resp_str': resp_str})
if resp.status == 200:
return {'status': resp.status,
'reason': resp.reason,
'body': json.loads(resp_str)}
except Exception:
LOG.error(_('vArmourRestAPI: Could not establish HTTP connection'))
def del_cfg_objs(self, url, prefix):
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
olist = resp['body']['response']
if not olist:
return
for o in olist:
if o.startswith(prefix):
self.rest_api('DELETE', url + '/"name:%s"' % o)
self.commit()
def count_cfg_objs(self, url, prefix):
count = 0
resp = self.rest_api('GET', url)
if resp and resp['status'] == 200:
for o in resp['body']['response']:
if o.startswith(prefix):
count += 1
return count

View File

@ -1,351 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, vArmour Networks Inc.
#
import sys
import eventlet
eventlet.monkey_patch()
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import l3_agent
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.common import config as common_config
from neutron.common import constants as l3_constants
from neutron.common import topics
from neutron.openstack.common import log as logging
from neutron.openstack.common import service
from neutron import service as neutron_service
from neutron.services.firewall.agents.l3reference import firewall_l3_agent
from neutron.services.firewall.agents.varmour import varmour_api
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
LOG = logging.getLogger(__name__)
class vArmourL3NATAgent(l3_agent.L3NATAgent,
firewall_l3_agent.FWaaSL3AgentRpcCallback):
def __init__(self, host, conf=None):
LOG.debug(_('vArmourL3NATAgent: __init__'))
self.rest = varmour_api.vArmourRestAPI()
super(vArmourL3NATAgent, self).__init__(host, conf)
def _destroy_router_namespaces(self, only_router_id=None):
return
def _destroy_router_namespace(self, namespace):
return
def _create_router_namespace(self, ri):
return
def _router_added(self, router_id, router):
LOG.debug(_("_router_added: %s"), router_id)
ri = l3_agent.RouterInfo(router_id, self.root_helper,
self.conf.use_namespaces, router)
self.router_info[router_id] = ri
super(vArmourL3NATAgent, self).process_router_add(ri)
def _router_removed(self, router_id):
LOG.debug(_("_router_removed: %s"), router_id)
ri = self.router_info[router_id]
if ri:
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
self.process_router(ri)
name = va_utils.get_snat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
name = va_utils.get_dnat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, name)
name = va_utils.get_trusted_zone_name(ri)
self._va_unset_zone_interfaces(name, True)
name = va_utils.get_untrusted_zone_name(ri)
self._va_unset_zone_interfaces(name, True)
del self.router_info[router_id]
def _spawn_metadata_proxy(self, router_id, ns_name):
return
def _destroy_metadata_proxy(self, router_id, ns_name):
return
def _set_subnet_info(self, port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
return
if len(ips) > 1:
LOG.warn(_("Ignoring multiple IPs on router port %s"), port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
def _va_unset_zone_interfaces(self, zone_name, remove_zone=False):
# return True if zone exists; otherwise, return False
LOG.debug(_("_va_unset_zone_interfaces: %s"), zone_name)
resp = self.rest.rest_api('GET', va_utils.REST_URL_CONF_ZONE)
if resp and resp['status'] == 200:
zlist = resp['body']['response']
for zn in zlist:
if zn == zone_name:
commit = False
if 'interface' in zlist[zn]:
for intf in zlist[zn]['interface']:
self.rest.rest_api('DELETE',
va_utils.REST_URL_CONF +
va_utils.REST_ZONE_NAME % zn +
va_utils.REST_INTF_NAME % intf)
commit = True
if remove_zone:
self.rest.rest_api('DELETE',
va_utils.REST_URL_CONF +
va_utils.REST_ZONE_NAME % zn)
commit = True
if commit:
self.rest.commit()
return True
return False
def _va_pif_2_lif(self, pif):
return pif + '.0'
def _va_set_interface_ip(self, pif, cidr):
LOG.debug(_("_va_set_interface_ip: %(pif)s %(cidr)s"),
{'pif': pif, 'cidr': cidr})
lif = self._va_pif_2_lif(pif)
obj = va_utils.REST_INTF_NAME % pif + va_utils.REST_LOGIC_NAME % lif
body = {
'name': lif,
'family': 'ipv4',
'address': cidr
}
self.rest.rest_api('PUT', va_utils.REST_URL_CONF + obj, body)
def _va_get_port_name(self, port_list, name):
if name:
for p in port_list:
if p['VM name'] == name:
return p['name']
def _va_config_trusted_zone(self, ri, plist):
zone = va_utils.get_trusted_zone_name(ri)
LOG.debug(_("_va_config_trusted_zone: %s"), zone)
body = {
'name': zone,
'type': 'L3',
'interface': []
}
if not self._va_unset_zone_interfaces(zone):
# if zone doesn't exist, create it
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
# add new internal ports to trusted zone
for p in ri.internal_ports:
if p['admin_state_up']:
dev = self.get_internal_device_name(p['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
lif = self._va_pif_2_lif(pif)
if lif not in body['interface']:
body['interface'].append(lif)
self._va_set_interface_ip(pif, p['ip_cidr'])
if body['interface']:
self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
def _va_config_untrusted_zone(self, ri, plist):
zone = va_utils.get_untrusted_zone_name(ri)
LOG.debug(_("_va_config_untrusted_zone: %s"), zone)
body = {
'name': zone,
'type': 'L3',
'interface': []
}
if not self._va_unset_zone_interfaces(zone):
# if zone doesn't exist, create it
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
# add new gateway ports to untrusted zone
if ri.ex_gw_port:
LOG.debug(_("_va_config_untrusted_zone: gw=%r"), ri.ex_gw_port)
dev = self.get_external_device_name(ri.ex_gw_port['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
lif = self._va_pif_2_lif(pif)
self._va_set_interface_ip(pif, ri.ex_gw_port['ip_cidr'])
body['interface'].append(lif)
self.rest.rest_api('PUT', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
def _va_config_router_snat_rules(self, ri, plist):
LOG.debug(_('_va_config_router_snat_rules: %s'), ri.router['id'])
prefix = va_utils.get_snat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
if not ri.enable_snat:
return
for idx, p in enumerate(ri.internal_ports):
if p['admin_state_up']:
dev = self.get_internal_device_name(p['id'])
pif = self._va_get_port_name(plist, dev)
if pif:
net = netaddr.IPNetwork(p['ip_cidr'])
body = {
'name': '%s_%d' % (prefix, idx),
'ingress-context-type': 'interface',
'ingress-index': self._va_pif_2_lif(pif),
'source-address': [
[str(netaddr.IPAddress(net.first + 2)),
str(netaddr.IPAddress(net.last - 1))]
],
'flag': 'interface translate-source'
}
self.rest.rest_api('POST',
va_utils.REST_URL_CONF_NAT_RULE,
body)
if ri.internal_ports:
self.rest.commit()
def _va_config_floating_ips(self, ri):
LOG.debug(_('_va_config_floating_ips: %s'), ri.router['id'])
prefix = va_utils.get_dnat_rule_name(ri)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_NAT_RULE, prefix)
# add new dnat rules
for idx, fip in enumerate(ri.floating_ips):
body = {
'name': '%s_%d' % (prefix, idx),
'ingress-context-type': 'zone',
'ingress-index': va_utils.get_untrusted_zone_name(ri),
'destination-address': [[fip['floating_ip_address'],
fip['floating_ip_address']]],
'static': [fip['fixed_ip_address'], fip['fixed_ip_address']],
'flag': 'translate-destination'
}
self.rest.rest_api('POST', va_utils.REST_URL_CONF_NAT_RULE, body)
if ri.floating_ips:
self.rest.commit()
def process_router(self, ri):
LOG.debug(_("process_router: %s"), ri.router['id'])
super(vArmourL3NATAgent, self).process_router(ri)
self.rest.auth()
# read internal port name and configuration port name map
resp = self.rest.rest_api('GET', va_utils.REST_URL_INTF_MAP)
if resp and resp['status'] == 200:
try:
plist = resp['body']['response']
except ValueError:
LOG.warn(_("Unable to parse interface mapping."))
return
else:
LOG.warn(_("Unable to read interface mapping."))
return
if ri.ex_gw_port:
self._set_subnet_info(ri.ex_gw_port)
self._va_config_trusted_zone(ri, plist)
self._va_config_untrusted_zone(ri, plist)
self._va_config_router_snat_rules(ri, plist)
self._va_config_floating_ips(ri)
def _handle_router_snat_rules(self, ri, ex_gw_port, internal_cidrs,
interface_name, action):
return
def _send_gratuitous_arp_packet(self, ri, interface_name, ip_address):
return
def external_gateway_added(self, ri, ex_gw_port,
interface_name, internal_cidrs):
LOG.debug(_("external_gateway_added: %s"), ri.router['id'])
if not ip_lib.device_exists(interface_name,
root_helper=self.root_helper,
namespace=ri.ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'], interface_name,
ex_gw_port['mac_address'],
bridge=self.conf.external_network_bridge,
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
self.driver.init_l3(interface_name, [ex_gw_port['ip_cidr']],
namespace=ri.ns_name)
def _update_routing_table(self, ri, operation, route):
return
class vArmourL3NATAgentWithStateReport(vArmourL3NATAgent,
l3_agent.L3NATAgentWithStateReport):
pass
def main():
conf = cfg.CONF
conf.register_opts(vArmourL3NATAgent.OPTS)
config.register_interface_driver_opts_helper(conf)
config.register_use_namespaces_opts_helper(conf)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf.register_opts(interface.OPTS)
conf.register_opts(external_process.OPTS)
common_config.init(sys.argv[1:])
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-l3-agent',
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.firewall.agents.varmour.varmour_router.'
'vArmourL3NATAgentWithStateReport')
service.launch(server).wait()

View File

@ -1,74 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, gduan@varmour.com, vArmour Networks
ROUTER_OBJ_PREFIX = 'r-'
OBJ_PREFIX_LEN = 8
TRUST_ZONE = '_z_trust'
UNTRUST_ZONE = '_z_untrust'
SNAT_RULE = '_snat'
DNAT_RULE = '_dnat'
ROUTER_POLICY = '_p'
REST_URL_CONF = '/config'
REST_URL_AUTH = '/auth'
REST_URL_COMMIT = '/commit'
REST_URL_INTF_MAP = '/operation/interface/mapping'
REST_URL_CONF_NAT_RULE = REST_URL_CONF + '/nat/rule'
REST_URL_CONF_ZONE = REST_URL_CONF + '/zone'
REST_URL_CONF_POLICY = REST_URL_CONF + '/policy'
REST_URL_CONF_ADDR = REST_URL_CONF + '/address'
REST_URL_CONF_SERVICE = REST_URL_CONF + '/service'
REST_ZONE_NAME = '/zone/"name:%s"'
REST_INTF_NAME = '/interface/"name:%s"'
REST_LOGIC_NAME = '/logical/"name:%s"'
REST_SERVICE_NAME = '/service/"name:%s"/rule'
def get_router_object_prefix(ri):
return ROUTER_OBJ_PREFIX + ri.router['id'][:OBJ_PREFIX_LEN]
def get_firewall_object_prefix(ri, fw):
return get_router_object_prefix(ri) + '-' + fw['id'][:OBJ_PREFIX_LEN]
def get_trusted_zone_name(ri):
return get_router_object_prefix(ri) + TRUST_ZONE
def get_untrusted_zone_name(ri):
return get_router_object_prefix(ri) + UNTRUST_ZONE
def get_snat_rule_name(ri):
return get_router_object_prefix(ri) + SNAT_RULE
def get_dnat_rule_name(ri):
return get_router_object_prefix(ri) + DNAT_RULE
def get_router_policy_name(ri):
return get_router_object_prefix(ri) + ROUTER_POLICY
def get_firewall_policy_name(ri, fw, rule):
return get_firewall_object_prefix(ri, fw) + rule['id'][:OBJ_PREFIX_LEN]

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,100 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class FwaasDriverBase(object):
"""Firewall as a Service Driver base class.
Using FwaasDriver Class, an instance of L3 perimeter Firewall
can be created. The firewall co-exists with the L3 agent.
One instance is created for each tenant. One firewall policy
is associated with each tenant (in the Havana release).
The Firewall can be visualized as having two zones (in Havana
release), trusted and untrusted.
All the 'internal' interfaces of Neutron Router is treated as trusted. The
interface connected to 'external network' is treated as untrusted.
The policy is applied on traffic ingressing/egressing interfaces on
the trusted zone. This implies that policy will be applied for traffic
passing from
- trusted to untrusted zones
- untrusted to trusted zones
- trusted to trusted zones
Policy WILL NOT be applied for traffic from untrusted to untrusted zones.
This is not a problem in Havana release as there is only one interface
connected to external network.
Since the policy is applied on the internal interfaces, the traffic
will be not be NATed to floating IP. For incoming traffic, the
traffic will get NATed to internal IP address before it hits
the firewall rules. So, while writing the rules, care should be
taken if using rules based on floating IP.
The firewall rule addition/deletion/insertion/update are done by the
management console. When the policy is sent to the driver, the complete
policy is sent and the whole policy has to be applied atomically. The
firewall rules will not get updated individually. This is to avoid problems
related to out-of-order notifications or inconsistent behaviour by partial
application of rules.
"""
@abc.abstractmethod
def create_firewall(self, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
@abc.abstractmethod
def delete_firewall(self, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
@abc.abstractmethod
def update_firewall(self, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
@abc.abstractmethod
def apply_default_policy(self, apply_list, firewall):
"""Apply the default policy on all trusted interfaces.
Remove current policy and apply the default policy on all trusted
interfaces.
"""
pass

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,275 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rajesh Mohan, Rajesh_Mohan3@Dell.com, DELL Inc.
from neutron.agent.linux import iptables_manager
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import log as logging
from neutron.services.firewall.drivers import fwaas_base
LOG = logging.getLogger(__name__)
FWAAS_DRIVER_NAME = 'Fwaas iptables driver'
FWAAS_DEFAULT_CHAIN = 'fwaas-default-policy'
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
EGRESS_DIRECTION: 'o'}
""" Firewall rules are applied on internal-interfaces of Neutron router.
The packets ingressing tenant's network will be on the output
direction on internal-interfaces.
"""
IPTABLES_DIR = {INGRESS_DIRECTION: '-o',
EGRESS_DIRECTION: '-i'}
IPV4 = 'ipv4'
IPV6 = 'ipv6'
IP_VER_TAG = {IPV4: 'v4',
IPV6: 'v6'}
class IptablesFwaasDriver(fwaas_base.FwaasDriverBase):
"""IPTables driver for Firewall As A Service."""
def __init__(self):
LOG.debug(_("Initializing fwaas iptables driver"))
def create_firewall(self, apply_list, firewall):
LOG.debug(_('Creating firewall %(fw_id)s for tenant %(tid)s)'),
{'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
try:
if firewall['admin_state_up']:
self._setup_firewall(apply_list, firewall)
else:
self.apply_default_policy(apply_list, firewall)
except (LookupError, RuntimeError):
# catch known library exceptions and raise Fwaas generic exception
LOG.exception(_("Failed to create firewall: %s"), firewall['id'])
raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
def delete_firewall(self, apply_list, firewall):
LOG.debug(_('Deleting firewall %(fw_id)s for tenant %(tid)s)'),
{'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
fwid = firewall['id']
try:
for router_info in apply_list:
ipt_mgr = router_info.iptables_manager
self._remove_chains(fwid, ipt_mgr)
self._remove_default_chains(ipt_mgr)
# apply the changes immediately (no defer in firewall path)
ipt_mgr.defer_apply_off()
except (LookupError, RuntimeError):
# catch known library exceptions and raise Fwaas generic exception
LOG.exception(_("Failed to delete firewall: %s"), fwid)
raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
def update_firewall(self, apply_list, firewall):
LOG.debug(_('Updating firewall %(fw_id)s for tenant %(tid)s)'),
{'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
try:
if firewall['admin_state_up']:
self._setup_firewall(apply_list, firewall)
else:
self.apply_default_policy(apply_list, firewall)
except (LookupError, RuntimeError):
# catch known library exceptions and raise Fwaas generic exception
LOG.exception(_("Failed to update firewall: %s"), firewall['id'])
raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
def apply_default_policy(self, apply_list, firewall):
LOG.debug(_('Applying firewall %(fw_id)s for tenant %(tid)s)'),
{'fw_id': firewall['id'], 'tid': firewall['tenant_id']})
fwid = firewall['id']
try:
for router_info in apply_list:
ipt_mgr = router_info.iptables_manager
# the following only updates local memory; no hole in FW
self._remove_chains(fwid, ipt_mgr)
self._remove_default_chains(ipt_mgr)
# create default 'DROP ALL' policy chain
self._add_default_policy_chain_v4v6(ipt_mgr)
self._enable_policy_chain(fwid, ipt_mgr)
# apply the changes immediately (no defer in firewall path)
ipt_mgr.defer_apply_off()
except (LookupError, RuntimeError):
# catch known library exceptions and raise Fwaas generic exception
LOG.exception(_("Failed to apply default policy on firewall: %s"),
fwid)
raise fw_ext.FirewallInternalDriverError(driver=FWAAS_DRIVER_NAME)
def _setup_firewall(self, apply_list, firewall):
fwid = firewall['id']
for router_info in apply_list:
ipt_mgr = router_info.iptables_manager
# the following only updates local memory; no hole in FW
self._remove_chains(fwid, ipt_mgr)
self._remove_default_chains(ipt_mgr)
# create default 'DROP ALL' policy chain
self._add_default_policy_chain_v4v6(ipt_mgr)
#create chain based on configured policy
self._setup_chains(firewall, ipt_mgr)
# apply the changes immediately (no defer in firewall path)
ipt_mgr.defer_apply_off()
def _get_chain_name(self, fwid, ver, direction):
return '%s%s%s' % (CHAIN_NAME_PREFIX[direction],
IP_VER_TAG[ver],
fwid)
def _setup_chains(self, firewall, ipt_mgr):
"""Create Fwaas chain using the rules in the policy
"""
fw_rules_list = firewall['firewall_rule_list']
fwid = firewall['id']
#default rules for invalid packets and established sessions
invalid_rule = self._drop_invalid_packets_rule()
est_rule = self._allow_established_rule()
for ver in [IPV4, IPV6]:
if ver == IPV4:
table = ipt_mgr.ipv4['filter']
else:
table = ipt_mgr.ipv6['filter']
ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION)
ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION)
for name in [ichain_name, ochain_name]:
table.add_chain(name)
table.add_rule(name, invalid_rule)
table.add_rule(name, est_rule)
for rule in fw_rules_list:
if not rule['enabled']:
continue
iptbl_rule = self._convert_fwaas_to_iptables_rule(rule)
if rule['ip_version'] == 4:
ver = IPV4
table = ipt_mgr.ipv4['filter']
else:
ver = IPV6
table = ipt_mgr.ipv6['filter']
ichain_name = self._get_chain_name(fwid, ver, INGRESS_DIRECTION)
ochain_name = self._get_chain_name(fwid, ver, EGRESS_DIRECTION)
table.add_rule(ichain_name, iptbl_rule)
table.add_rule(ochain_name, iptbl_rule)
self._enable_policy_chain(fwid, ipt_mgr)
def _remove_default_chains(self, nsid):
"""Remove fwaas default policy chain."""
self._remove_chain_by_name(IPV4, FWAAS_DEFAULT_CHAIN, nsid)
self._remove_chain_by_name(IPV6, FWAAS_DEFAULT_CHAIN, nsid)
def _remove_chains(self, fwid, ipt_mgr):
"""Remove fwaas policy chain."""
for ver in [IPV4, IPV6]:
for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
chain_name = self._get_chain_name(fwid, ver, direction)
self._remove_chain_by_name(ver, chain_name, ipt_mgr)
def _add_default_policy_chain_v4v6(self, ipt_mgr):
ipt_mgr.ipv4['filter'].add_chain(FWAAS_DEFAULT_CHAIN)
ipt_mgr.ipv4['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP')
ipt_mgr.ipv6['filter'].add_chain(FWAAS_DEFAULT_CHAIN)
ipt_mgr.ipv6['filter'].add_rule(FWAAS_DEFAULT_CHAIN, '-j DROP')
def _remove_chain_by_name(self, ver, chain_name, ipt_mgr):
if ver == IPV4:
ipt_mgr.ipv4['filter'].ensure_remove_chain(chain_name)
else:
ipt_mgr.ipv6['filter'].ensure_remove_chain(chain_name)
def _add_rules_to_chain(self, ipt_mgr, ver, chain_name, rules):
if ver == IPV4:
table = ipt_mgr.ipv4['filter']
else:
table = ipt_mgr.ipv6['filter']
for rule in rules:
table.add_rule(chain_name, rule)
def _enable_policy_chain(self, fwid, ipt_mgr):
bname = iptables_manager.binary_name
for (ver, tbl) in [(IPV4, ipt_mgr.ipv4['filter']),
(IPV6, ipt_mgr.ipv6['filter'])]:
for direction in [INGRESS_DIRECTION, EGRESS_DIRECTION]:
chain_name = self._get_chain_name(fwid, ver, direction)
chain_name = iptables_manager.get_chain_name(chain_name)
if chain_name in tbl.chains:
jump_rule = ['%s qr-+ -j %s-%s' % (IPTABLES_DIR[direction],
bname, chain_name)]
self._add_rules_to_chain(ipt_mgr, ver, 'FORWARD',
jump_rule)
#jump to DROP_ALL policy
chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN)
jump_rule = ['-o qr-+ -j %s-%s' % (bname, chain_name)]
self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule)
self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule)
#jump to DROP_ALL policy
chain_name = iptables_manager.get_chain_name(FWAAS_DEFAULT_CHAIN)
jump_rule = ['-i qr-+ -j %s-%s' % (bname, chain_name)]
self._add_rules_to_chain(ipt_mgr, IPV4, 'FORWARD', jump_rule)
self._add_rules_to_chain(ipt_mgr, IPV6, 'FORWARD', jump_rule)
def _convert_fwaas_to_iptables_rule(self, rule):
action = rule.get('action') == 'allow' and 'ACCEPT' or 'DROP'
args = [self._protocol_arg(rule.get('protocol')),
self._port_arg('dport',
rule.get('protocol'),
rule.get('destination_port')),
self._port_arg('sport',
rule.get('protocol'),
rule.get('source_port')),
self._ip_prefix_arg('s', rule.get('source_ip_address')),
self._ip_prefix_arg('d', rule.get('destination_ip_address')),
self._action_arg(action)]
iptables_rule = ' '.join(args)
return iptables_rule
def _drop_invalid_packets_rule(self):
return '-m state --state INVALID -j DROP'
def _allow_established_rule(self):
return '-m state --state ESTABLISHED,RELATED -j ACCEPT'
def _action_arg(self, action):
if action:
return '-j %s' % action
return ''
def _protocol_arg(self, protocol):
if protocol:
return '-p %s' % protocol
return ''
def _port_arg(self, direction, protocol, port):
if not (protocol in ['udp', 'tcp'] and port):
return ''
return '--%s %s' % (direction, port)
def _ip_prefix_arg(self, direction, ip_prefix):
if ip_prefix:
return '-%s %s' % (direction, ip_prefix)
return ''

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,207 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 vArmour Networks Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Gary Duan, gduan@varmour.com, vArmour Networks
from neutron.openstack.common import log as logging
from neutron.services.firewall.agents.varmour import varmour_api
from neutron.services.firewall.agents.varmour import varmour_utils as va_utils
from neutron.services.firewall.drivers import fwaas_base
LOG = logging.getLogger(__name__)
class vArmourFwaasDriver(fwaas_base.FwaasDriverBase):
def __init__(self):
LOG.debug(_("Initializing fwaas vArmour driver"))
self.rest = varmour_api.vArmourRestAPI()
def create_firewall(self, apply_list, firewall):
LOG.debug(_('create_firewall (%s)'), firewall['id'])
return self.update_firewall(apply_list, firewall)
def update_firewall(self, apply_list, firewall):
LOG.debug(_("update_firewall (%s)"), firewall['id'])
if firewall['admin_state_up']:
return self._update_firewall(apply_list, firewall)
else:
return self.apply_default_policy(apply_list, firewall)
def delete_firewall(self, apply_list, firewall):
LOG.debug(_("delete_firewall (%s)"), firewall['id'])
return self.apply_default_policy(apply_list, firewall)
def apply_default_policy(self, apply_list, firewall):
LOG.debug(_("apply_default_policy (%s)"), firewall['id'])
self.rest.auth()
for ri in apply_list:
self._clear_policy(ri, firewall)
return True
def _update_firewall(self, apply_list, firewall):
LOG.debug(_("Updating firewall (%s)"), firewall['id'])
self.rest.auth()
for ri in apply_list:
self._clear_policy(ri, firewall)
self._setup_policy(ri, firewall)
return True
def _setup_policy(self, ri, fw):
# create zones no matter if they exist. Interfaces are added by router
body = {
'type': 'L3',
'interface': []
}
body['name'] = va_utils.get_trusted_zone_name(ri)
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
body['name'] = va_utils.get_untrusted_zone_name(ri)
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ZONE, body)
self.rest.commit()
servs = dict()
addrs = dict()
for rule in fw['firewall_rule_list']:
if not rule['enabled']:
continue
if rule['ip_version'] == 4:
service = self._make_service(ri, fw, rule, servs)
s_addr = self._make_address(ri, fw, rule, addrs, True)
d_addr = self._make_address(ri, fw, rule, addrs, False)
policy = va_utils.get_firewall_policy_name(ri, fw, rule)
z0 = va_utils.get_trusted_zone_name(ri)
z1 = va_utils.get_untrusted_zone_name(ri)
body = self._make_policy(policy + '_0', rule,
z0, z0, s_addr, d_addr, service)
self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
body = self._make_policy(policy + '_1', rule,
z0, z1, s_addr, d_addr, service)
self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
body = self._make_policy(policy + '_2', rule,
z1, z0, s_addr, d_addr, service)
self.rest.rest_api('POST', va_utils.REST_URL_CONF_POLICY, body)
self.rest.commit()
else:
LOG.warn(_("Unsupported IP version rule."))
def _clear_policy(self, ri, fw):
prefix = va_utils.get_firewall_object_prefix(ri, fw)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_POLICY, prefix)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_ADDR, prefix)
self.rest.del_cfg_objs(va_utils.REST_URL_CONF_SERVICE, prefix)
def _make_service(self, ri, fw, rule, servs):
prefix = va_utils.get_firewall_object_prefix(ri, fw)
if rule.get('protocol'):
key = rule.get('protocol')
if rule.get('source_port'):
key += '-' + rule.get('source_port')
if rule.get('destination_port'):
key += '-' + rule.get('destination_port')
else:
return
if key in servs:
name = '%s_%d' % (prefix, servs[key])
else:
# create new service object with index
idx = len(servs)
servs[key] = idx
name = '%s_%d' % (prefix, idx)
body = {'name': name}
self.rest.rest_api('POST',
va_utils.REST_URL_CONF_SERVICE,
body)
body = self._make_service_rule(rule)
self.rest.rest_api('POST',
va_utils.REST_URL_CONF +
va_utils.REST_SERVICE_NAME % name,
body)
self.rest.commit()
return name
def _make_service_rule(self, rule):
body = {
'name': '1',
'protocol': rule.get('protocol')
}
if 'source_port' in rule:
body['source-start'] = rule['source_port']
body['source-end'] = rule['source_port']
if 'destination_port' in rule:
body['dest-start'] = rule['destination_port']
body['dest-end'] = rule['destination_port']
return body
def _make_address(self, ri, fw, rule, addrs, is_src):
prefix = va_utils.get_firewall_object_prefix(ri, fw)
if is_src:
key = rule.get('source_ip_address')
else:
key = rule.get('destination_ip_address')
if not key:
return
if key in addrs:
name = '%s_%d' % (prefix, addrs[key])
else:
# create new address object with idx
idx = len(addrs)
addrs[key] = idx
name = '%s_%d' % (prefix, idx)
body = {
'name': name,
'type': 'ipv4',
'ipv4': key
}
self.rest.rest_api('POST', va_utils.REST_URL_CONF_ADDR, body)
self.rest.commit()
return name
def _make_policy(self, name, rule, zone0, zone1, s_addr, d_addr, service):
body = {
'name': name,
'action': 'permit' if rule.get('action') == 'allow' else 'deny',
'from': zone0,
'to': zone1,
'match-source-address': [s_addr or 'Any'],
'match-dest-address': [d_addr or 'Any'],
'match-service': [service or 'Any']
}
return body

View File

@ -1,299 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
from oslo.config import cfg
from neutron.common import exceptions as n_exception
from neutron.common import rpc_compat
from neutron.common import topics
from neutron import context as neutron_context
from neutron.db import api as qdbapi
from neutron.db.firewall import firewall_db
from neutron.extensions import firewall as fw_ext
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as const
LOG = logging.getLogger(__name__)
class FirewallCallbacks(rpc_compat.RpcCallback):
RPC_API_VERSION = '1.0'
def __init__(self, plugin):
super(FirewallCallbacks, self).__init__()
self.plugin = plugin
def set_firewall_status(self, context, firewall_id, status, **kwargs):
"""Agent uses this to set a firewall's status."""
LOG.debug(_("set_firewall_status() called"))
with context.session.begin(subtransactions=True):
fw_db = self.plugin._get_firewall(context, firewall_id)
# ignore changing status if firewall expects to be deleted
# That case means that while some pending operation has been
# performed on the backend, neutron server received delete request
# and changed firewall status to const.PENDING_DELETE
if fw_db.status == const.PENDING_DELETE:
LOG.debug(_("Firewall %(fw_id)s in PENDING_DELETE state, "
"not changing to %(status)s"),
{'fw_id': firewall_id, 'status': status})
return False
#TODO(xuhanp): Remove INACTIVE status and use DOWN to
# be consistent with other network resources
if status in (const.ACTIVE, const.INACTIVE, const.DOWN):
fw_db.status = status
return True
else:
fw_db.status = const.ERROR
return False
def firewall_deleted(self, context, firewall_id, **kwargs):
"""Agent uses this to indicate firewall is deleted."""
LOG.debug(_("firewall_deleted() called"))
with context.session.begin(subtransactions=True):
fw_db = self.plugin._get_firewall(context, firewall_id)
# allow to delete firewalls in ERROR state
if fw_db.status in (const.PENDING_DELETE, const.ERROR):
self.plugin.delete_db_firewall_object(context, firewall_id)
return True
else:
LOG.warn(_('Firewall %(fw)s unexpectedly deleted by agent, '
'status was %(status)s'),
{'fw': firewall_id, 'status': fw_db.status})
fw_db.status = const.ERROR
return False
def get_firewalls_for_tenant(self, context, **kwargs):
"""Agent uses this to get all firewalls and rules for a tenant."""
LOG.debug(_("get_firewalls_for_tenant() called"))
fw_list = [
self.plugin._make_firewall_dict_with_rules(context, fw['id'])
for fw in self.plugin.get_firewalls(context)
]
return fw_list
def get_firewalls_for_tenant_without_rules(self, context, **kwargs):
"""Agent uses this to get all firewalls for a tenant."""
LOG.debug(_("get_firewalls_for_tenant_without_rules() called"))
fw_list = [fw for fw in self.plugin.get_firewalls(context)]
return fw_list
def get_tenants_with_firewalls(self, context, **kwargs):
"""Agent uses this to get all tenants that have firewalls."""
LOG.debug(_("get_tenants_with_firewalls() called"))
ctx = neutron_context.get_admin_context()
fw_list = self.plugin.get_firewalls(ctx)
fw_tenant_list = list(set(fw['tenant_id'] for fw in fw_list))
return fw_tenant_list
class FirewallAgentApi(rpc_compat.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
API_VERSION = '1.0'
def __init__(self, topic, host):
super(FirewallAgentApi, self).__init__(topic, self.API_VERSION)
self.host = host
def create_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('create_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
def update_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('update_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
def delete_firewall(self, context, firewall):
return self.fanout_cast(
context,
self.make_msg('delete_firewall', firewall=firewall,
host=self.host),
topic=self.topic
)
class FirewallCountExceeded(n_exception.Conflict):
"""Reference implementation specific exception for firewall count.
Only one firewall is supported per tenant. When a second
firewall is tried to be created, this exception will be raised.
"""
message = _("Exceeded allowed count of firewalls for tenant "
"%(tenant_id)s. Only one firewall is supported per tenant.")
class FirewallPlugin(firewall_db.Firewall_db_mixin):
"""Implementation of the Neutron Firewall Service Plugin.
This class manages the workflow of FWaaS request/response.
Most DB related works are implemented in class
firewall_db.Firewall_db_mixin.
"""
supported_extension_aliases = ["fwaas"]
def __init__(self):
"""Do the initialization for the firewall service plugin here."""
qdbapi.register_models()
self.endpoints = [FirewallCallbacks(self)]
self.conn = rpc_compat.create_connection(new=True)
self.conn.create_consumer(
topics.FIREWALL_PLUGIN, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = FirewallAgentApi(
topics.L3_AGENT,
cfg.CONF.host
)
def _make_firewall_dict_with_rules(self, context, firewall_id):
firewall = self.get_firewall(context, firewall_id)
fw_policy_id = firewall['firewall_policy_id']
if fw_policy_id:
fw_policy = self.get_firewall_policy(context, fw_policy_id)
fw_rules_list = [self.get_firewall_rule(
context, rule_id) for rule_id in fw_policy['firewall_rules']]
firewall['firewall_rule_list'] = fw_rules_list
else:
firewall['firewall_rule_list'] = []
# FIXME(Sumit): If the size of the firewall object we are creating
# here exceeds the largest message size supported by rabbit/qpid
# then we will have a problem.
return firewall
def _rpc_update_firewall(self, context, firewall_id):
status_update = {"firewall": {"status": const.PENDING_UPDATE}}
fw = super(FirewallPlugin, self).update_firewall(context, firewall_id,
status_update)
if fw:
fw_with_rules = (
self._make_firewall_dict_with_rules(context,
firewall_id))
self.agent_rpc.update_firewall(context, fw_with_rules)
def _rpc_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._rpc_update_firewall(context, firewall_id)
def _ensure_update_firewall(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [const.PENDING_CREATE,
const.PENDING_UPDATE,
const.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy and 'firewall_list' in firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._ensure_update_firewall(context, firewall_id)
def _ensure_update_firewall_rule(self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']:
self._ensure_update_firewall_policy(context,
fw_rule['firewall_policy_id'])
def create_firewall(self, context, firewall):
LOG.debug(_("create_firewall() called"))
tenant_id = self._get_tenant_id_for_create(context,
firewall['firewall'])
fw_count = self.get_firewalls_count(context,
filters={'tenant_id': [tenant_id]})
if fw_count:
raise FirewallCountExceeded(tenant_id=tenant_id)
firewall['firewall']['status'] = const.PENDING_CREATE
fw = super(FirewallPlugin, self).create_firewall(context, firewall)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.create_firewall(context, fw_with_rules)
return fw
def update_firewall(self, context, id, firewall):
LOG.debug(_("update_firewall() called"))
self._ensure_update_firewall(context, id)
firewall['firewall']['status'] = const.PENDING_UPDATE
fw = super(FirewallPlugin, self).update_firewall(context, id, firewall)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.update_firewall(context, fw_with_rules)
return fw
def delete_db_firewall_object(self, context, id):
firewall = self.get_firewall(context, id)
if firewall['status'] in [const.PENDING_DELETE]:
super(FirewallPlugin, self).delete_firewall(context, id)
def delete_firewall(self, context, id):
LOG.debug(_("delete_firewall() called"))
status_update = {"firewall": {"status": const.PENDING_DELETE}}
fw = super(FirewallPlugin, self).update_firewall(context, id,
status_update)
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
self.agent_rpc.delete_firewall(context, fw_with_rules)
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug(_("update_firewall_policy() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).update_firewall_policy(context, id, firewall_policy)
self._rpc_update_firewall_policy(context, id)
return fwp
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug(_("update_firewall_rule() called"))
self._ensure_update_firewall_rule(context, id)
fwr = super(FirewallPlugin,
self).update_firewall_rule(context, id, firewall_rule)
firewall_policy_id = fwr['firewall_policy_id']
if firewall_policy_id:
self._rpc_update_firewall_policy(context, firewall_policy_id)
return fwr
def insert_rule(self, context, id, rule_info):
LOG.debug(_("insert_rule() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).insert_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp
def remove_rule(self, context, id, rule_info):
LOG.debug(_("remove_rule() called"))
self._ensure_update_firewall_policy(context, id)
fwp = super(FirewallPlugin,
self).remove_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp

View File

@ -1,30 +0,0 @@
This service plugin implements the L3 routing functionality (resources router
and floatingip) that in earlier releases before Havana was provided by core
plugins (openvswitch, linuxbridge, ... etc).
Core plugins can now choose not to implement L3 routing functionality and
instead delegate that to the L3 routing service plugin.
The required changes to a core plugin are in that case:
- Do not inherit 'l3_db.L3_NAT_db_mixin' (or its descendants like extraroute)
anymore.
- Remove "router" from 'supported_extension_aliases'.
- Modify any 'self' references to members in L3_NAT_db_mixin to instead use
'manager.NeutronManager.get_service_plugins().get(constants.L3_ROUTER_NAT)'
For example,
self.prevent_l3_port_deletion(...)
becomes something like
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if plugin:
plugin.prevent_l3_port_deletion(...)
If the core plugin has relied on the L3Agent the following must also be changed:
- Do not inherit 'l3_rpc_base.L3RpcCallbackMixin' in any '*RpcCallbacks' class.
- Do not be a consumer of the topics.L3PLUGIN topic for RPC.
To use the L3 routing service plugin, add
'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin'
to 'service_plugins' in '/etc/neutron/neutron.conf'.
That is,
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,135 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc.
from neutron.db import api as qdbapi
from neutron.db import db_base_plugin_v2
from neutron.db import extraroute_db
from neutron.db import l3_gwmode_db
from neutron.db import model_base
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.plugins.ml2.drivers.cisco.apic import apic_manager
LOG = logging.getLogger(__name__)
class ApicL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2,
db_base_plugin_v2.CommonDbMixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin):
"""Implementation of the APIC L3 Router Service Plugin.
This class implements a L3 service plugin that provides
internal gateway functionality for the Cisco APIC (Application
Policy Infrastructure Controller).
"""
supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"]
def __init__(self):
super(ApicL3ServicePlugin, self).__init__()
qdbapi.register_models(base=model_base.BASEV2)
self.manager = apic_manager.APICManager()
@staticmethod
def get_plugin_type():
return constants.L3_ROUTER_NAT
@staticmethod
def get_plugin_description():
"""Returns string description of the plugin."""
return _("L3 Router Service Plugin for basic L3 using the APIC")
def _add_epg_to_contract(self, tenant_id, epg, contract):
"""Add an End Point Group(EPG) to a contract as provider/consumer."""
if self.manager.db.get_provider_contract():
# Set this network's EPG as a consumer
self.manager.set_contract_for_epg(tenant_id, epg.epg_id,
contract.contract_id)
else:
# Set this network's EPG as a provider
self.manager.set_contract_for_epg(tenant_id, epg.epg_id,
contract.contract_id,
provider=True)
def add_router_interface(self, context, router_id, interface_info):
"""Attach a subnet to a router."""
tenant_id = context.tenant_id
subnet_id = interface_info['subnet_id']
LOG.debug("Attaching subnet %(subnet_id)s to "
"router %(router_id)s" % {'subnet_id': subnet_id,
'router_id': router_id})
# Get network for this subnet
subnet = self.get_subnet(context, subnet_id)
network_id = subnet['network_id']
net_name = self.get_network(context, network_id)['name']
# Setup tenant filters and contracts
contract = self.manager.create_tenant_contract(tenant_id)
# Check for a provider EPG
epg = self.manager.ensure_epg_created_for_network(tenant_id,
network_id,
net_name)
self._add_epg_to_contract(tenant_id, epg, contract)
# Create DB port
try:
return super(ApicL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
except Exception:
LOG.error(_("Error attaching subnet %(subnet_id)s to "
"router %(router_id)s") % {'subnet_id': subnet_id,
'router_id': router_id})
with excutils.save_and_reraise_exception():
self.manager.delete_contract_for_epg(tenant_id, epg.epg_id,
contract.contract_id,
provider=epg.provider)
def remove_router_interface(self, context, router_id, interface_info):
"""Detach a subnet from a router."""
tenant_id = context.tenant_id
subnet_id = interface_info['subnet_id']
LOG.debug("Detaching subnet %(subnet_id)s from "
"router %(router_id)s" % {'subnet_id': subnet_id,
'router_id': router_id})
# Get network for this subnet
subnet = self.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = self.get_network(context, network_id)
contract = self.manager.create_tenant_contract(tenant_id)
epg = self.manager.ensure_epg_created_for_network(tenant_id,
network_id,
network['name'])
# Delete contract for this epg
self.manager.delete_contract_for_epg(tenant_id, epg.epg_id,
contract.contract_id,
provider=epg.provider)
try:
return super(ApicL3ServicePlugin, self).remove_router_interface(
context, router_id, interface_info)
except Exception:
LOG.error(_("Error detaching subnet %(subnet_id)s from "
"router %(router_id)s") % {'subnet_id': subnet_id,
'router_id': router_id})
with excutils.save_and_reraise_exception():
self._add_epg_to_contract(tenant_id, epg, contract)

View File

@ -1,98 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Bob Melander, Cisco Systems, Inc.
from oslo.config import cfg
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.common import constants as q_const
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.db import api as qdbapi
from neutron.db import db_base_plugin_v2
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import model_base
from neutron.openstack.common import importutils
from neutron.plugins.common import constants
class L3RouterPluginRpcCallbacks(rpc_compat.RpcCallback,
l3_rpc_base.L3RpcCallbackMixin):
RPC_API_VERSION = '1.1'
class L3RouterPlugin(db_base_plugin_v2.CommonDbMixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Implementation of the Neutron L3 Router Service Plugin.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
All DB related work is implemented in classes
l3_db.L3_NAT_db_mixin and extraroute_db.ExtraRoute_db_mixin.
"""
supported_extension_aliases = ["router", "ext-gw-mode",
"extraroute", "l3_agent_scheduler"]
def __init__(self):
qdbapi.register_models(base=model_base.BASEV2)
self.setup_rpc()
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = rpc_compat.create_connection(new=True)
self.agent_notifiers.update(
{q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [L3RouterPluginRpcCallbacks()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def get_plugin_type(self):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
"""returns string description of the plugin."""
return ("L3 Router Service Plugin for basic L3 forwarding"
" between (L2) Neutron networks and access to external"
" networks via a NAT gateway.")
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data fo the floating IP being created
:returns: A floating IP object on success
AS the l3 router plugin aysnchrounously creates floating IPs
leveraging tehe l3 agent, the initial status fro the floating
IP object will be DOWN.
"""
return super(L3RouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=q_const.FLOATINGIP_STATUS_DOWN)

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,72 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import sys
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.common import config as common_config
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.openstack.common import service
from neutron.services.loadbalancer.agent import agent_manager as manager
OPTS = [
cfg.IntOpt(
'periodic_interval',
default=10,
help=_('Seconds between periodic task runs')
)
]
class LbaasAgentService(rpc_compat.Service):
def start(self):
super(LbaasAgentService, self).start()
self.tg.add_timer(
cfg.CONF.periodic_interval,
self.manager.run_periodic_tasks,
None,
None
)
def main():
cfg.CONF.register_opts(OPTS)
cfg.CONF.register_opts(manager.OPTS)
# import interface options just in case the driver uses namespaces
cfg.CONF.register_opts(interface.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging(cfg.CONF)
mgr = manager.LbaasAgentManager(cfg.CONF)
svc = LbaasAgentService(
host=cfg.CONF.host,
topic=topics.LOADBALANCER_AGENT,
manager=mgr
)
service.launch(svc).wait()

View File

@ -1,100 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from neutron.common import rpc_compat
class LbaasAgentApi(rpc_compat.RpcProxy):
"""Agent side of the Agent to Plugin RPC API."""
API_VERSION = '2.0'
# history
# 1.0 Initial version
# 2.0 Generic API for agent based drivers
# - get_logical_device() handling changed on plugin side;
# - pool_deployed() and update_status() methods added;
def __init__(self, topic, context, host):
super(LbaasAgentApi, self).__init__(topic, self.API_VERSION)
self.context = context
self.host = host
def get_ready_devices(self):
return self.call(
self.context,
self.make_msg('get_ready_devices', host=self.host),
topic=self.topic
)
def pool_destroyed(self, pool_id):
return self.call(
self.context,
self.make_msg('pool_destroyed', pool_id=pool_id),
topic=self.topic
)
def pool_deployed(self, pool_id):
return self.call(
self.context,
self.make_msg('pool_deployed', pool_id=pool_id),
topic=self.topic
)
def get_logical_device(self, pool_id):
return self.call(
self.context,
self.make_msg(
'get_logical_device',
pool_id=pool_id
),
topic=self.topic
)
def update_status(self, obj_type, obj_id, status):
return self.call(
self.context,
self.make_msg('update_status', obj_type=obj_type, obj_id=obj_id,
status=status),
topic=self.topic
)
def plug_vip_port(self, port_id):
return self.call(
self.context,
self.make_msg('plug_vip_port', port_id=port_id, host=self.host),
topic=self.topic
)
def unplug_vip_port(self, port_id):
return self.call(
self.context,
self.make_msg('unplug_vip_port', port_id=port_id, host=self.host),
topic=self.topic
)
def update_pool_stats(self, pool_id, stats):
return self.call(
self.context,
self.make_msg(
'update_pool_stats',
pool_id=pool_id,
stats=stats,
host=self.host
),
topic=self.topic
)

View File

@ -1,98 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AgentDeviceDriver(object):
"""Abstract device driver that defines the API required by LBaaS agent."""
@abc.abstractmethod
def get_name(cls):
"""Returns unique name across all LBaaS device drivers."""
pass
@abc.abstractmethod
def deploy_instance(self, logical_config):
"""Fully deploys a loadbalancer instance from a given config."""
pass
@abc.abstractmethod
def undeploy_instance(self, pool_id):
"""Fully undeploys the loadbalancer instance."""
pass
@abc.abstractmethod
def get_stats(self, pool_id):
pass
def remove_orphans(self, known_pool_ids):
# Not all drivers will support this
raise NotImplementedError()
@abc.abstractmethod
def create_vip(self, vip):
pass
@abc.abstractmethod
def update_vip(self, old_vip, vip):
pass
@abc.abstractmethod
def delete_vip(self, vip):
pass
@abc.abstractmethod
def create_pool(self, pool):
pass
@abc.abstractmethod
def update_pool(self, old_pool, pool):
pass
@abc.abstractmethod
def delete_pool(self, pool):
pass
@abc.abstractmethod
def create_member(self, member):
pass
@abc.abstractmethod
def update_member(self, old_member, member):
pass
@abc.abstractmethod
def delete_member(self, member):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, health_monitor, pool_id):
pass
@abc.abstractmethod
def update_pool_health_monitor(self,
old_health_monitor,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, health_monitor, pool_id):
pass

View File

@ -1,338 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from oslo.config import cfg
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc_compat
from neutron.common import topics
from neutron import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_api
LOG = logging.getLogger(__name__)
OPTS = [
cfg.MultiStrOpt(
'device_driver',
default=['neutron.services.loadbalancer.drivers'
'.haproxy.namespace_driver.HaproxyNSDriver'],
help=_('Drivers used to manage loadbalancing devices'),
),
]
class DeviceNotFoundOnAgent(n_exc.NotFound):
msg = _('Unknown device with pool_id %(pool_id)s')
class LbaasAgentManager(rpc_compat.RpcCallback, periodic_task.PeriodicTasks):
RPC_API_VERSION = '2.0'
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
def __init__(self, conf):
super(LbaasAgentManager, self).__init__()
self.conf = conf
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_api.LbaasAgentApi(
topics.LOADBALANCER_PLUGIN,
self.context,
self.conf.host
)
self._load_drivers()
self.agent_state = {
'binary': 'neutron-lbaas-agent',
'host': conf.host,
'topic': topics.LOADBALANCER_AGENT,
'configurations': {'device_drivers': self.device_drivers.keys()},
'agent_type': n_const.AGENT_TYPE_LOADBALANCER,
'start_flag': True}
self.admin_state_up = True
self._setup_state_rpc()
self.needs_resync = False
# pool_id->device_driver_name mapping used to store known instances
self.instance_mapping = {}
def _load_drivers(self):
self.device_drivers = {}
for driver in self.conf.device_driver:
try:
driver_inst = importutils.import_object(
driver,
self.conf,
self.plugin_rpc
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % driver)
driver_name = driver_inst.get_name()
if driver_name not in self.device_drivers:
self.device_drivers[driver_name] = driver_inst
else:
msg = _('Multiple device drivers with the same name found: %s')
raise SystemExit(msg % driver_name)
def _setup_state_rpc(self):
self.state_rpc = agent_rpc.PluginReportStateAPI(
topics.LOADBALANCER_PLUGIN)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
instance_count = len(self.instance_mapping)
self.agent_state['configurations']['instances'] = instance_count
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id, driver_name in self.instance_mapping.items():
driver = self.device_drivers[driver_name]
try:
stats = driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error updating statistics on pool %s'),
pool_id)
self.needs_resync = True
def sync_state(self):
known_instances = set(self.instance_mapping.keys())
try:
ready_instances = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_instances - ready_instances:
self._destroy_pool(deleted_id)
for pool_id in ready_instances:
self._reload_pool(pool_id)
except Exception:
LOG.exception(_('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
def _get_driver(self, pool_id):
if pool_id not in self.instance_mapping:
raise DeviceNotFoundOnAgent(pool_id=pool_id)
driver_name = self.instance_mapping[pool_id]
return self.device_drivers[driver_name]
def _reload_pool(self, pool_id):
try:
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
LOG.error(_('No device driver '
'on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, constants.ERROR)
return
self.device_drivers[driver_name].deploy_instance(logical_config)
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
driver = self._get_driver(pool_id)
try:
driver.undeploy_instance(pool_id)
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
self.needs_resync = True
def remove_orphans(self):
for driver_name in self.device_drivers:
pool_ids = [pool_id for pool_id in self.instance_mapping
if self.instance_mapping[pool_id] == driver_name]
try:
self.device_drivers[driver_name].remove_orphans(pool_ids)
except NotImplementedError:
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver '
'%(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
def create_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.create_vip(vip)
except Exception:
self._handle_failed_driver_call('create', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def update_vip(self, context, old_vip, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.update_vip(old_vip, vip)
except Exception:
self._handle_failed_driver_call('update', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def delete_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
driver.delete_vip(vip)
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
LOG.error(_('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
return
driver = self.device_drivers[driver_name]
try:
driver.create_pool(pool)
except Exception:
self._handle_failed_driver_call('create', 'pool', pool['id'],
driver.get_name())
else:
self.instance_mapping[pool['id']] = driver_name
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def update_pool(self, context, old_pool, pool):
driver = self._get_driver(pool['id'])
try:
driver.update_pool(old_pool, pool)
except Exception:
self._handle_failed_driver_call('update', 'pool', pool['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def delete_pool(self, context, pool):
driver = self._get_driver(pool['id'])
driver.delete_pool(pool)
del self.instance_mapping[pool['id']]
def create_member(self, context, member):
driver = self._get_driver(member['pool_id'])
try:
driver.create_member(member)
except Exception:
self._handle_failed_driver_call('create', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def update_member(self, context, old_member, member):
driver = self._get_driver(member['pool_id'])
try:
driver.update_member(old_member, member)
except Exception:
self._handle_failed_driver_call('update', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def delete_member(self, context, member):
driver = self._get_driver(member['pool_id'])
driver.delete_member(member)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.create_pool_health_monitor(health_monitor, pool_id)
except Exception:
self._handle_failed_driver_call(
'create', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.update_pool_health_monitor(old_health_monitor,
health_monitor,
pool_id)
except Exception:
self._handle_failed_driver_call(
'update', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
driver.delete_pool_health_monitor(health_monitor, pool_id)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
for pool_id in self.instance_mapping.keys():
LOG.info(_("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
LOG.info(_("Agent_updated by server side %s!"), payload)

View File

@ -1,130 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from neutron.common import constants
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import model_base
from neutron.extensions import lbaas_agentscheduler
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PoolLoadbalancerAgentBinding(model_base.BASEV2):
"""Represents binding between neutron loadbalancer pools and agents."""
pool_id = sa.Column(sa.String(36),
sa.ForeignKey("pools.id", ondelete='CASCADE'),
primary_key=True)
agent = orm.relation(agents_db.Agent)
agent_id = sa.Column(sa.String(36), sa.ForeignKey("agents.id",
ondelete='CASCADE'),
nullable=False)
class LbaasAgentSchedulerDbMixin(agentschedulers_db.AgentSchedulerDbMixin,
lbaas_agentscheduler
.LbaasAgentSchedulerPluginBase):
def get_lbaas_agent_hosting_pool(self, context, pool_id, active=None):
query = context.session.query(PoolLoadbalancerAgentBinding)
query = query.options(joinedload('agent'))
binding = query.get(pool_id)
if (binding and self.is_eligible_agent(
active, binding.agent)):
return {'agent': self._make_agent_dict(binding.agent)}
def get_lbaas_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=constants.AGENT_TYPE_LOADBALANCER)
if active is not None:
query = query.filter_by(admin_state_up=active)
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [agent
for agent in query
if self.is_eligible_agent(active, agent)]
def list_pools_on_lbaas_agent(self, context, id):
query = context.session.query(PoolLoadbalancerAgentBinding.pool_id)
query = query.filter_by(agent_id=id)
pool_ids = [item[0] for item in query]
if pool_ids:
return {'pools': self.get_pools(context, filters={'id': pool_ids})}
else:
return {'pools': []}
def get_lbaas_agent_candidates(self, device_driver, active_agents):
candidates = []
for agent in active_agents:
agent_conf = self.get_configuration_dict(agent)
if device_driver in agent_conf['device_drivers']:
candidates.append(agent)
return candidates
class ChanceScheduler(object):
"""Allocate a loadbalancer agent for a vip in a random way."""
def schedule(self, plugin, context, pool, device_driver):
"""Schedule the pool to an active loadbalancer agent if there
is no enabled agent hosting it.
"""
with context.session.begin(subtransactions=True):
lbaas_agent = plugin.get_lbaas_agent_hosting_pool(
context, pool['id'])
if lbaas_agent:
LOG.debug(_('Pool %(pool_id)s has already been hosted'
' by lbaas agent %(agent_id)s'),
{'pool_id': pool['id'],
'agent_id': lbaas_agent['id']})
return
active_agents = plugin.get_lbaas_agents(context, active=True)
if not active_agents:
LOG.warn(_('No active lbaas agents for pool %s'), pool['id'])
return
candidates = plugin.get_lbaas_agent_candidates(device_driver,
active_agents)
if not candidates:
LOG.warn(_('No lbaas agent supporting device driver %s'),
device_driver)
return
chosen_agent = random.choice(candidates)
binding = PoolLoadbalancerAgentBinding()
binding.agent = chosen_agent
binding.pool_id = pool['id']
context.session.add(binding)
LOG.debug(_('Pool %(pool_id)s is scheduled to '
'lbaas agent %(agent_id)s'),
{'pool_id': pool['id'],
'agent_id': chosen_agent['id']})
return chosen_agent

View File

@ -1,47 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN'
LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS'
LB_METHOD_SOURCE_IP = 'SOURCE_IP'
PROTOCOL_TCP = 'TCP'
PROTOCOL_HTTP = 'HTTP'
PROTOCOL_HTTPS = 'HTTPS'
HEALTH_MONITOR_PING = 'PING'
HEALTH_MONITOR_TCP = 'TCP'
HEALTH_MONITOR_HTTP = 'HTTP'
HEALTH_MONITOR_HTTPS = 'HTTPS'
SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP'
SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE'
SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE'
STATS_ACTIVE_CONNECTIONS = 'active_connections'
STATS_MAX_CONNECTIONS = 'max_connections'
STATS_TOTAL_CONNECTIONS = 'total_connections'
STATS_CURRENT_SESSIONS = 'current_sessions'
STATS_MAX_SESSIONS = 'max_sessions'
STATS_TOTAL_SESSIONS = 'total_sessions'
STATS_IN_BYTES = 'bytes_in'
STATS_OUT_BYTES = 'bytes_out'
STATS_CONNECTION_ERRORS = 'connection_errors'
STATS_RESPONSE_ERRORS = 'response_errors'
STATS_STATUS = 'status'
STATS_HEALTH = 'health'
STATS_FAILED_CHECKS = 'failed_checks'

View File

@ -1,17 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -1,130 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class LoadBalancerAbstractDriver(object):
"""Abstract lbaas driver that expose ~same API as lbaas plugin.
The configuration elements (Vip,Member,etc) are the dicts that
are returned to the tenant.
Get operations are not part of the API - it will be handled
by the lbaas plugin.
"""
@abc.abstractmethod
def create_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and set the Vip status to ACTIVE/ERROR according
to the backend call result
self.plugin.update_status(context, Vip, vip["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_vip(self, context, old_vip, vip):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Vip, id, constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_vip(self, context, vip):
"""A real driver would invoke a call to his backend
and try to delete the Vip.
if the deletion was successful, delete the record from the database.
if the deletion has failed, set the Vip status to ERROR.
"""
pass
@abc.abstractmethod
def create_pool(self, context, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Pool, pool["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_pool(self, context, old_pool, pool):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context,
Pool,
pool["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_pool(self, context, pool):
"""Driver can call the code below in order to delete the pool.
self.plugin._delete_db_pool(context, pool["id"])
or set the status to ERROR if deletion failed
"""
pass
@abc.abstractmethod
def stats(self, context, pool_id):
pass
@abc.abstractmethod
def create_member(self, context, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member, member["id"],
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def update_member(self, context, old_member, member):
"""Driver may call the code below in order to update the status.
self.plugin.update_status(context, Member,
member["id"], constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_member(self, context, member):
pass
@abc.abstractmethod
def update_pool_health_monitor(self, context,
old_health_monitor,
health_monitor,
pool_id):
pass
@abc.abstractmethod
def create_pool_health_monitor(self, context,
health_monitor,
pool_id):
"""Driver may call the code below in order to update the status.
self.plugin.update_pool_health_monitor(context,
health_monitor["id"],
pool_id,
constants.ACTIVE)
"""
pass
@abc.abstractmethod
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
pass

View File

@ -1,445 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import uuid
from oslo.config import cfg
from neutron.common import constants as q_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.db import agents_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.extensions import lbaas_agentscheduler
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
LOG = logging.getLogger(__name__)
AGENT_SCHEDULER_OPTS = [
cfg.StrOpt('loadbalancer_pool_scheduler_driver',
default='neutron.services.loadbalancer.agent_scheduler'
'.ChanceScheduler',
help=_('Driver to use for scheduling '
'pool to a default loadbalancer agent')),
]
cfg.CONF.register_opts(AGENT_SCHEDULER_OPTS)
class DriverNotSpecified(n_exc.NeutronException):
message = _("Device driver for agent should be specified "
"in plugin driver.")
class LoadBalancerCallbacks(rpc_compat.RpcCallback):
RPC_API_VERSION = '2.0'
# history
# 1.0 Initial version
# 2.0 Generic API for agent based drivers
# - get_logical_device() handling changed;
# - pool_deployed() and update_status() methods added;
def __init__(self, plugin):
super(LoadBalancerCallbacks, self).__init__()
self.plugin = plugin
def get_ready_devices(self, context, host=None):
with context.session.begin(subtransactions=True):
agents = self.plugin.get_lbaas_agents(context,
filters={'host': [host]})
if not agents:
return []
elif len(agents) > 1:
LOG.warning(_('Multiple lbaas agents found on host %s'), host)
pools = self.plugin.list_pools_on_lbaas_agent(context,
agents[0].id)
pool_ids = [pool['id'] for pool in pools['pools']]
qry = context.session.query(loadbalancer_db.Pool.id)
qry = qry.filter(loadbalancer_db.Pool.id.in_(pool_ids))
qry = qry.filter(
loadbalancer_db.Pool.status.in_(
constants.ACTIVE_PENDING_STATUSES))
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
return [id for id, in qry]
def get_logical_device(self, context, pool_id=None):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
retval = {}
retval['pool'] = self.plugin._make_pool_dict(pool)
if pool.vip:
retval['vip'] = self.plugin._make_vip_dict(pool.vip)
retval['vip']['port'] = (
self.plugin._core_plugin._make_port_dict(pool.vip.port)
)
for fixed_ip in retval['vip']['port']['fixed_ips']:
fixed_ip['subnet'] = (
self.plugin._core_plugin.get_subnet(
context,
fixed_ip['subnet_id']
)
)
retval['members'] = [
self.plugin._make_member_dict(m)
for m in pool.members if (
m.status in constants.ACTIVE_PENDING_STATUSES or
m.status == constants.INACTIVE)
]
retval['healthmonitors'] = [
self.plugin._make_health_monitor_dict(hm.healthmonitor)
for hm in pool.monitors
if hm.status in constants.ACTIVE_PENDING_STATUSES
]
retval['driver'] = (
self.plugin.drivers[pool.provider.provider_name].device_driver)
return retval
def pool_deployed(self, context, pool_id):
with context.session.begin(subtransactions=True):
qry = context.session.query(loadbalancer_db.Pool)
qry = qry.filter_by(id=pool_id)
pool = qry.one()
# set all resources to active
if pool.status in constants.ACTIVE_PENDING_STATUSES:
pool.status = constants.ACTIVE
if (pool.vip and pool.vip.status in
constants.ACTIVE_PENDING_STATUSES):
pool.vip.status = constants.ACTIVE
for m in pool.members:
if m.status in constants.ACTIVE_PENDING_STATUSES:
m.status = constants.ACTIVE
for hm in pool.monitors:
if hm.status in constants.ACTIVE_PENDING_STATUSES:
hm.status = constants.ACTIVE
def update_status(self, context, obj_type, obj_id, status):
model_mapping = {
'pool': loadbalancer_db.Pool,
'vip': loadbalancer_db.Vip,
'member': loadbalancer_db.Member,
'health_monitor': loadbalancer_db.PoolMonitorAssociation
}
if obj_type not in model_mapping:
raise n_exc.Invalid(_('Unknown object type: %s') % obj_type)
try:
if obj_type == 'health_monitor':
self.plugin.update_pool_health_monitor(
context, obj_id['monitor_id'], obj_id['pool_id'], status)
else:
self.plugin.update_status(
context, model_mapping[obj_type], obj_id, status)
except n_exc.NotFound:
# update_status may come from agent on an object which was
# already deleted from db with other request
LOG.warning(_('Cannot update status: %(obj_type)s %(obj_id)s '
'not found in the DB, it was probably deleted '
'concurrently'),
{'obj_type': obj_type, 'obj_id': obj_id})
def pool_destroyed(self, context, pool_id=None):
"""Agent confirmation hook that a pool has been destroyed.
This method exists for subclasses to change the deletion
behavior.
"""
pass
def plug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to plug.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = True
port['device_owner'] = 'neutron:' + constants.LOADBALANCER
port['device_id'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, str(host)))
port[portbindings.HOST_ID] = host
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
def unplug_vip_port(self, context, port_id=None, host=None):
if not port_id:
return
try:
port = self.plugin._core_plugin.get_port(
context,
port_id
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
return
port['admin_state_up'] = False
port['device_owner'] = ''
port['device_id'] = ''
try:
self.plugin._core_plugin.update_port(
context,
port_id,
{'port': port}
)
except n_exc.PortNotFound:
msg = _('Unable to find port %s to unplug. This can occur when '
'the Vip has been deleted first.')
LOG.debug(msg, port_id)
def update_pool_stats(self, context, pool_id=None, stats=None, host=None):
self.plugin.update_pool_stats(context, pool_id, data=stats)
class LoadBalancerAgentApi(rpc_compat.RpcProxy):
"""Plugin side of plugin to agent RPC API."""
BASE_RPC_API_VERSION = '2.0'
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
def __init__(self, topic):
super(LoadBalancerAgentApi, self).__init__(
topic, default_version=self.BASE_RPC_API_VERSION)
def _cast(self, context, method_name, method_args, host, version=None):
return self.cast(
context,
self.make_msg(method_name, **method_args),
topic='%s.%s' % (self.topic, host),
version=version
)
def create_vip(self, context, vip, host):
return self._cast(context, 'create_vip', {'vip': vip}, host)
def update_vip(self, context, old_vip, vip, host):
return self._cast(context, 'update_vip',
{'old_vip': old_vip, 'vip': vip}, host)
def delete_vip(self, context, vip, host):
return self._cast(context, 'delete_vip', {'vip': vip}, host)
def create_pool(self, context, pool, host, driver_name):
return self._cast(context, 'create_pool',
{'pool': pool, 'driver_name': driver_name}, host)
def update_pool(self, context, old_pool, pool, host):
return self._cast(context, 'update_pool',
{'old_pool': old_pool, 'pool': pool}, host)
def delete_pool(self, context, pool, host):
return self._cast(context, 'delete_pool', {'pool': pool}, host)
def create_member(self, context, member, host):
return self._cast(context, 'create_member', {'member': member}, host)
def update_member(self, context, old_member, member, host):
return self._cast(context, 'update_member',
{'old_member': old_member, 'member': member}, host)
def delete_member(self, context, member, host):
return self._cast(context, 'delete_member', {'member': member}, host)
def create_pool_health_monitor(self, context, health_monitor, pool_id,
host):
return self._cast(context, 'create_pool_health_monitor',
{'health_monitor': health_monitor,
'pool_id': pool_id}, host)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id, host):
return self._cast(context, 'update_pool_health_monitor',
{'old_health_monitor': old_health_monitor,
'health_monitor': health_monitor,
'pool_id': pool_id}, host)
def delete_pool_health_monitor(self, context, health_monitor, pool_id,
host):
return self._cast(context, 'delete_pool_health_monitor',
{'health_monitor': health_monitor,
'pool_id': pool_id}, host)
def agent_updated(self, context, admin_state_up, host):
return self._cast(context, 'agent_updated',
{'payload': {'admin_state_up': admin_state_up}},
host)
class AgentDriverBase(abstract_driver.LoadBalancerAbstractDriver):
# name of device driver that should be used by the agent;
# vendor specific plugin drivers must override it;
device_driver = None
def __init__(self, plugin):
if not self.device_driver:
raise DriverNotSpecified()
self.agent_rpc = LoadBalancerAgentApi(topics.LOADBALANCER_AGENT)
self.plugin = plugin
self._set_callbacks_on_plugin()
self.plugin.agent_notifiers.update(
{q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
self.pool_scheduler = importutils.import_object(
cfg.CONF.loadbalancer_pool_scheduler_driver)
def _set_callbacks_on_plugin(self):
# other agent based plugin driver might already set callbacks on plugin
if hasattr(self.plugin, 'agent_callbacks'):
return
self.plugin.agent_endpoints = [
LoadBalancerCallbacks(self.plugin),
agents_db.AgentExtRpcCallback(self.plugin)
]
self.plugin.conn = rpc_compat.create_connection(new=True)
self.plugin.conn.create_consumer(
topics.LOADBALANCER_PLUGIN,
self.plugin.agent_endpoints,
fanout=False)
self.plugin.conn.consume_in_threads()
def get_pool_agent(self, context, pool_id):
agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool_id)
if not agent:
raise lbaas_agentscheduler.NoActiveLbaasAgent(pool_id=pool_id)
return agent['agent']
def create_vip(self, context, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
self.agent_rpc.create_vip(context, vip, agent['host'])
def update_vip(self, context, old_vip, vip):
agent = self.get_pool_agent(context, vip['pool_id'])
if vip['status'] in constants.ACTIVE_PENDING_STATUSES:
self.agent_rpc.update_vip(context, old_vip, vip, agent['host'])
else:
self.agent_rpc.delete_vip(context, vip, agent['host'])
def delete_vip(self, context, vip):
self.plugin._delete_db_vip(context, vip['id'])
agent = self.get_pool_agent(context, vip['pool_id'])
self.agent_rpc.delete_vip(context, vip, agent['host'])
def create_pool(self, context, pool):
agent = self.pool_scheduler.schedule(self.plugin, context, pool,
self.device_driver)
if not agent:
raise lbaas_agentscheduler.NoEligibleLbaasAgent(pool_id=pool['id'])
self.agent_rpc.create_pool(context, pool, agent['host'],
self.device_driver)
def update_pool(self, context, old_pool, pool):
agent = self.get_pool_agent(context, pool['id'])
if pool['status'] in constants.ACTIVE_PENDING_STATUSES:
self.agent_rpc.update_pool(context, old_pool, pool,
agent['host'])
else:
self.agent_rpc.delete_pool(context, pool, agent['host'])
def delete_pool(self, context, pool):
# get agent first to know host as binding will be deleted
# after pool is deleted from db
agent = self.plugin.get_lbaas_agent_hosting_pool(context, pool['id'])
self.plugin._delete_db_pool(context, pool['id'])
if agent:
self.agent_rpc.delete_pool(context, pool, agent['agent']['host'])
def create_member(self, context, member):
agent = self.get_pool_agent(context, member['pool_id'])
self.agent_rpc.create_member(context, member, agent['host'])
def update_member(self, context, old_member, member):
agent = self.get_pool_agent(context, member['pool_id'])
# member may change pool id
if member['pool_id'] != old_member['pool_id']:
old_pool_agent = self.plugin.get_lbaas_agent_hosting_pool(
context, old_member['pool_id'])
if old_pool_agent:
self.agent_rpc.delete_member(context, old_member,
old_pool_agent['agent']['host'])
self.agent_rpc.create_member(context, member, agent['host'])
else:
self.agent_rpc.update_member(context, old_member, member,
agent['host'])
def delete_member(self, context, member):
self.plugin._delete_db_member(context, member['id'])
agent = self.get_pool_agent(context, member['pool_id'])
self.agent_rpc.delete_member(context, member, agent['host'])
def create_pool_health_monitor(self, context, healthmon, pool_id):
# healthmon is not used here
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.create_pool_health_monitor(context, healthmon,
pool_id, agent['host'])
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.update_pool_health_monitor(context, old_health_monitor,
health_monitor, pool_id,
agent['host'])
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(
context, health_monitor['id'], pool_id
)
agent = self.get_pool_agent(context, pool_id)
self.agent_rpc.delete_pool_health_monitor(context, health_monitor,
pool_id, agent['host'])
def stats(self, context, pool_id):
pass

View File

@ -1,9 +0,0 @@
Embrane LBaaS Driver
This DRIVER interfaces OpenStack Neutron with Embrane's heleos platform,
Load Balancing appliances for cloud environments.
L2 connectivity is leveraged by one of the supported existing plugins.
For more details on use, configuration and implementation please refer to:
https://wiki.openstack.org/wiki/Neutron/LBaaS/EmbraneDriver

View File

@ -1,108 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
from eventlet import greenthread
from eventlet import queue
from heleosapi import exceptions as h_exc
from neutron.openstack.common import log as logging
from neutron.plugins.embrane.common import contexts as ctx
from neutron.services.loadbalancer.drivers.embrane.agent import lb_operations
from neutron.services.loadbalancer.drivers.embrane import constants as econ
LOG = logging.getLogger(__name__)
class Dispatcher(object):
def __init__(self, driver, async=True):
self._async = async
self._driver = driver
self.sync_items = dict()
self.handlers = lb_operations.handlers
def dispatch_lb(self, d_context, *args, **kwargs):
item = d_context.item
event = d_context.event
n_context = d_context.n_context
chain = d_context.chain
item_id = item["id"]
if event in self.handlers:
for f in self.handlers[event]:
first_run = False
if item_id not in self.sync_items:
self.sync_items[item_id] = [queue.Queue()]
first_run = True
self.sync_items[item_id][0].put(
ctx.OperationContext(event, n_context, item, chain, f,
args, kwargs))
if first_run:
t = greenthread.spawn(self._consume_lb,
item_id,
self.sync_items[item_id][0],
self._driver,
self._async)
self.sync_items[item_id].append(t)
if not self._async:
t = self.sync_items[item_id][1]
t.wait()
def _consume_lb(self, sync_item, sync_queue, driver, a_sync):
current_state = None
while True:
try:
if current_state == econ.DELETED:
del self.sync_items[sync_item]
return
try:
operation_context = sync_queue.get(
block=a_sync,
timeout=econ.QUEUE_TIMEOUT)
except queue.Empty:
del self.sync_items[sync_item]
return
(operation_context.chain and
operation_context.chain.execute_all())
transient_state = None
try:
transient_state = operation_context.function(
driver, operation_context.n_context,
operation_context.item, *operation_context.args,
**operation_context.kwargs)
except (h_exc.PendingDva, h_exc.DvaNotFound,
h_exc.BrokenInterface, h_exc.DvaCreationFailed,
h_exc.BrokenDva, h_exc.ConfigurationFailed) as ex:
LOG.warning(econ.error_map[type(ex)], ex.message)
except h_exc.DvaDeleteFailed as ex:
LOG.warning(econ.error_map[type(ex)], ex.message)
transient_state = econ.DELETED
finally:
# if the returned transient state is None, no operations
# are required on the DVA status
if transient_state == econ.DELETED:
current_state = driver._delete_vip(
operation_context.n_context,
operation_context.item)
# Error state cannot be reverted
else:
driver._update_vip_graph_state(
operation_context.n_context,
operation_context.item)
except Exception:
LOG.exception(_('Unhandled exception occurred'))

View File

@ -1,179 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
import functools
from heleosapi import exceptions as h_exc
from neutron.openstack.common import log as logging
from neutron.services.loadbalancer import constants as lcon
from neutron.services.loadbalancer.drivers.embrane import constants as econ
LOG = logging.getLogger(__name__)
handlers = {}
def handler(event, handler):
def wrap(f):
if event not in handler.keys():
handler[event] = [f]
else:
handler[event].append(f)
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return f(*args, **kwargs)
return wrapped_f
return wrap
@handler(econ.Events.CREATE_VIP, handlers)
def _provision_load_balancer(driver, context, vip, flavor,
vip_utif_info, vip_ip_allocation_info,
pool_utif_info=None,
pool_ip_allocation_info=None,
pool=None, members=None,
monitors=None):
api = driver._heleos_api
tenant_id = context.tenant_id
admin_state = vip["admin_state_up"]
# Architectural configuration
api.create_load_balancer(tenant_id=tenant_id,
router_id=vip["id"],
name=vip["name"],
flavor=flavor,
up=False)
api.grow_interface(vip_utif_info, False, tenant_id, vip["id"])
if pool:
api.grow_interface(pool_utif_info, False, tenant_id,
vip["id"])
# Logical configuration
api.allocate_address(vip["id"], True, vip_ip_allocation_info)
if pool:
api.allocate_address(vip["id"], True, pool_ip_allocation_info)
dva = api.configure_load_balancer(vip["id"], admin_state,
vip, pool,
monitors, members)
return api.extract_dva_state(dva)
@handler(econ.Events.UPDATE_VIP, handlers)
def _update_load_balancer(driver, context, vip,
old_pool_id=None, old_port_id=None,
removed_ip=None, pool_utif_info=None,
pool_ip_allocation_info=None,
new_pool=None, members=None,
monitors=None):
api = driver._heleos_api
tenant_id = context.tenant_id
admin_state = vip["admin_state_up"]
if old_pool_id:
# Architectural Changes
api.de_allocate_address(vip['id'], False, old_port_id, removed_ip)
api.shrink_interface(tenant_id, vip["id"], False, old_port_id)
api.grow_interface(pool_utif_info, False, tenant_id, vip["id"])
# Configuration Changes
api.allocate_address(vip["id"], True, pool_ip_allocation_info)
api.replace_pool(vip["id"], True, vip, old_pool_id,
new_pool, monitors, members)
api.update_vservice(vip["id"], True, vip)
# Dva update
dva = api.update_dva(tenant_id, vip["id"], vip["name"],
admin_state, description=vip["description"])
return api.extract_dva_state(dva)
@handler(econ.Events.DELETE_VIP, handlers)
def _delete_load_balancer(driver, context, vip):
try:
driver._heleos_api.delete_dva(context.tenant_id, vip['id'])
except h_exc.DvaNotFound:
LOG.warning(_('The load balancer %s had no physical representation, '
'likely already deleted'), vip['id'])
return econ.DELETED
@handler(econ.Events.UPDATE_POOL, handlers)
def _update_server_pool(driver, context, vip, pool,
monitors=None):
api = driver._heleos_api
cookie = ((vip.get('session_persistence') or {}).get('type') ==
lcon.SESSION_PERSISTENCE_HTTP_COOKIE)
return api.extract_dva_state(api.update_pool(vip['id'],
vip['admin_state_up'],
pool, cookie, monitors))
@handler(econ.Events.ADD_OR_UPDATE_MEMBER, handlers)
def _add_or_update_pool_member(driver, context, vip, member, protocol):
api = driver._heleos_api
return api.extract_dva_state(api.update_backend_server(
vip['id'], vip['admin_state_up'], member, protocol))
@handler(econ.Events.REMOVE_MEMBER, handlers)
def _remove_member_from_pool(driver, context, vip, member):
api = driver._heleos_api
return api.extract_dva_state(api.remove_pool_member(vip['id'],
vip['admin_state_up'],
member))
@handler(econ.Events.DELETE_MEMBER, handlers)
def _delete_member(driver, context, vip, member):
with context.session.begin(subtransactions=True):
api = driver._heleos_api
dva = api.delete_backend_server(vip['id'], vip['admin_state_up'],
member)
driver._delete_member(context, member)
return api.extract_dva_state(dva)
@handler(econ.Events.ADD_POOL_HM, handlers)
def _create_pool_hm(driver, context, vip, hm, pool_id):
api = driver._heleos_api
return api.extract_dva_state(api.add_pool_monitor(
vip['id'], vip['admin_state_up'], hm, pool_id))
@handler(econ.Events.UPDATE_POOL_HM, handlers)
def _update_pool_hm(driver, context, vip, hm, pool_id):
api = driver._heleos_api
return api.extract_dva_state(api.update_pool_monitor(
vip['id'], vip['admin_state_up'], hm, pool_id))
@handler(econ.Events.DELETE_POOL_HM, handlers)
def _delete_pool_hm(driver, context, vip, hm, pool_id):
with context.session.begin(subtransactions=True):
api = driver._heleos_api
dva = api.add_pool_monitor(vip['id'], vip['admin_state_up'],
hm, pool_id)
driver._delete_pool_hm(context, hm, pool_id)
return api.extract_dva_state(dva)
@handler(econ.Events.POLL_GRAPH, handlers)
def _poll_graph(driver, context, vip):
api = driver._heleos_api
return api.extract_dva_state(api.get_dva(vip['id']))

View File

@ -1,53 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
from oslo.config import cfg
# User may want to use LB service together with the L3 plugin, but using
# different resources. The service will inherit the configuration from the
# L3 heleos plugin if present and not overridden.
heleos_opts = [
cfg.StrOpt('esm_mgmt',
help=_('ESM management root address')),
cfg.StrOpt('admin_username',
help=_('ESM admin username.')),
cfg.StrOpt('admin_password',
secret=True,
help=_('ESM admin password.')),
cfg.StrOpt('lb_image',
help=_('Load Balancer image id (Embrane LB)')),
cfg.StrOpt('inband_id',
help=_('In band Security Zone id for LBs')),
cfg.StrOpt('oob_id',
help=_('Out of band Security Zone id for LBs')),
cfg.StrOpt('mgmt_id',
help=_('Management Security Zone id for LBs')),
cfg.StrOpt('dummy_utif_id',
help=_('Dummy user traffic Security Zone id for LBs')),
cfg.StrOpt('resource_pool_id',
help=_('Shared resource pool id')),
cfg.StrOpt('lb_flavor', default="small",
help=_('choose LB image flavor to use, accepted values: small, '
'medium')),
cfg.IntOpt('sync_interval', default=60,
help=_('resource synchronization interval in seconds')),
cfg.BoolOpt('async_requests',
help=_('Define if the requests have '
'run asynchronously or not')),
]
cfg.CONF.register_opts(heleos_opts, 'heleoslb')

View File

@ -1,74 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
from heleosapi import constants as h_con
from heleosapi import exceptions as h_exc
from neutron.plugins.common import constants as ccon
DELETED = 'DELETED' # not visible status
QUEUE_TIMEOUT = 300
BACK_SUB_LIMIT = 6
class BackendActions:
UPDATE = 'update'
GROW = 'grow'
REMOVE = 'remove'
SHRINK = 'shrink'
class Events:
CREATE_VIP = 'create_vip'
UPDATE_VIP = 'update_vip'
DELETE_VIP = 'delete_vip'
UPDATE_POOL = 'update_pool'
UPDATE_MEMBER = 'update_member'
ADD_OR_UPDATE_MEMBER = 'add_or_update_member'
REMOVE_MEMBER = 'remove_member'
DELETE_MEMBER = 'delete_member'
POLL_GRAPH = 'poll_graph'
ADD_POOL_HM = "create_pool_hm"
UPDATE_POOL_HM = "update_pool_hm"
DELETE_POOL_HM = "delete_pool_hm"
_DVA_PENDING_ERROR_MSG = _('Dva is pending for the following reason: %s')
_DVA_NOT_FOUNT_ERROR_MSG = _('%s, '
'probably was cancelled through the heleos UI')
_DVA_BROKEN_ERROR_MSG = _('Dva seems to be broken for reason %s')
_DVA_CREATION_FAILED_ERROR_MSG = _('Dva creation failed reason %s')
_DVA_CREATION_PENDING_ERROR_MSG = _('Dva creation is in pending state '
'for reason %s')
_CFG_FAILED_ERROR_MSG = _('Dva configuration failed for reason %s')
_DVA_DEL_FAILED_ERROR_MSG = _('Failed to delete the backend '
'load balancer for reason %s. Please remove '
'it manually through the heleos UI')
NO_MEMBER_SUBNET_WARN = _('No subnet is associated to member %s (required '
'to identify the proper load balancer port)')
error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG,
h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG,
h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG,
h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG,
h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG,
h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG,
h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG}
state_map = {h_con.DvaState.POWER_ON: ccon.ACTIVE,
None: ccon.ERROR,
DELETED: DELETED}

View File

@ -1,56 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
import neutron.db.api as db
from neutron.db import models_v2 as nmodel
from neutron.services.loadbalancer.drivers.embrane import models
def initialize():
db.configure_db()
def add_pool_port(context, pool_id, port_id):
session = context.session
with session.begin(subtransactions=True):
pool_port = models.PoolPort()
pool_port.pool_id = pool_id
pool_port.port_id = port_id
session.add(pool_port)
def get_pool_port(context, pool_id):
return (context.session.query(models.PoolPort).filter_by(pool_id=pool_id).
first())
def delete_pool_backend(context, pool_id):
session = context.session
backend = (session.query(models.PoolPort).filter_by(
pool_id=pool_id))
for b in backend:
delete_pool_port(context, b)
def delete_pool_port(context, backend_port):
session = context.session
with session.begin(subtransactions=True):
port = (session.query(nmodel.Port).filter_by(
id=backend_port['port_id'])).first()
if port:
session.delete(backend_port)
session.delete(port)

View File

@ -1,342 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
from heleosapi import backend_operations as h_op
from heleosapi import constants as h_con
from heleosapi import info as h_info
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.extensions import loadbalancer as lb_ext
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as pcon
from neutron.plugins.embrane.common import contexts as embrane_ctx
from neutron.plugins.embrane.common import exceptions as h_exc
from neutron.plugins.embrane.common import utils
from neutron.services.loadbalancer import constants as lbcon
from neutron.services.loadbalancer.drivers import abstract_driver
from neutron.services.loadbalancer.drivers.embrane.agent import dispatcher
from neutron.services.loadbalancer.drivers.embrane import config # noqa
from neutron.services.loadbalancer.drivers.embrane import constants as econ
from neutron.services.loadbalancer.drivers.embrane import db as edb
from neutron.services.loadbalancer.drivers.embrane import poller
LOG = logging.getLogger(__name__)
conf = cfg.CONF.heleoslb
confh = {}
try:
confh = cfg.CONF.heleos
except cfg.NoSuchOptError:
pass
def get_conf(x):
try:
return conf.get(x) or confh.get(x)
except cfg.NoSuchOptError:
return
class EmbraneLbaas(abstract_driver.LoadBalancerAbstractDriver):
def __init__(self, plugin):
edb.initialize()
config_esm_mgmt = get_conf('esm_mgmt')
config_admin_username = get_conf('admin_username')
config_admin_password = get_conf('admin_password')
config_lb_image_id = get_conf('lb_image')
config_security_zones = {h_con.SzType.IB: get_conf('inband_id'),
h_con.SzType.OOB: get_conf('oob_id'),
h_con.SzType.MGMT: get_conf('mgmt_id'),
h_con.SzType.DUMMY: get_conf('dummy_utif_id')}
config_resource_pool = get_conf('resource_pool_id')
self._heleos_api = h_op.BackendOperations(
esm_mgmt=config_esm_mgmt,
admin_username=config_admin_username,
admin_password=config_admin_password,
lb_image_id=config_lb_image_id,
security_zones=config_security_zones,
resource_pool=config_resource_pool)
self._dispatcher = dispatcher.Dispatcher(
self, get_conf("async_requests"))
self.plugin = plugin
poll_interval = conf.get('sync_interval')
if poll_interval > 0:
self._loop_call = poller.Poller(self)
self._loop_call.start_polling(conf.get('sync_interval'))
self._flavor = get_conf('lb_flavor')
def _validate_vip(self, vip):
if vip.get('connection_limit') and vip['connection_limit'] != -1:
raise h_exc.UnsupportedException(
err_msg=_('Connection limit is not supported by Embrane LB'))
persistance = vip.get('session_persistence')
if (persistance and persistance.get('type') ==
lbcon.SESSION_PERSISTENCE_APP_COOKIE):
p_type = vip['session_persistence']['type']
raise h_exc.UnsupportedException(
err_msg=_('Session persistence %s '
'not supported by Embrane LBaaS') % p_type)
def _delete_vip(self, context, vip):
with context.session.begin(subtransactions=True):
self.plugin._delete_db_vip(context, vip['id'])
return econ.DELETED
def _delete_member(self, context, member):
self.plugin._delete_db_member(context, member['id'])
def _delete_pool_hm(self, context, health_monitor, pool_id):
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def _update_vip_graph_state(self, context, vip):
self._heleos_api.update_vip_status(vip)
self.plugin.update_status(context, ldb.Vip, vip['id'],
vip['status'])
if vip['status'] != pcon.ERROR:
pool = self.plugin.get_pool(context, vip['pool_id'])
pool_members = pool['members']
# Manages possible manual changes and monitor actions
self._heleos_api.update_pool_status(vip['id'], pool)
self._heleos_api.update_members_status(vip['id'], pool['id'],
pool_members)
self.plugin.update_status(context, ldb.Pool, pool['id'],
pool['status'])
for member in pool_members:
self.plugin.update_status(context, ldb.Member,
member['id'], member['status'])
def _create_backend_port(self, context, db_pool):
try:
subnet = self.plugin._core_plugin.get_subnet(context,
db_pool["subnet_id"])
except n_exc.SubnetNotFound:
LOG.warning(_("Subnet assigned to pool %s doesn't exist, "
"backend port can't be created"), db_pool['id'])
return
fixed_ip = {'subnet_id': subnet['id'],
'fixed_ips': attributes.ATTR_NOT_SPECIFIED}
port_data = {
'tenant_id': db_pool['tenant_id'],
'name': 'pool-' + db_pool['id'],
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': '',
'fixed_ips': [fixed_ip]
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
return edb.add_pool_port(context, db_pool['id'], port['id'])
def _retrieve_utif_info(self, context, neutron_port):
network = self.plugin._core_plugin.get_network(
context, neutron_port['network_id'])
result = h_info.UtifInfo(network.get('provider:segmentation_id'),
network['name'],
network['id'],
False,
network['tenant_id'],
neutron_port['id'],
neutron_port['mac_address'],
network.get('provider:network_type'))
return result
def create_vip(self, context, vip):
self._validate_vip(vip)
db_vip = self.plugin.populate_vip_graph(context, vip)
vip_port = self.plugin._core_plugin._get_port(context,
db_vip['port_id'])
vip_utif_info = self._retrieve_utif_info(context, vip_port)
vip_ip_allocation_info = utils.retrieve_ip_allocation_info(
context, vip_port)
vip_ip_allocation_info.is_gw = True
db_pool = pool_utif_info = pool_ip_allocation_info = None
members = monitors = []
if db_vip['pool_id']:
db_pool = self.plugin.get_pool(
context, db_vip['pool_id'])
pool_port = edb.get_pool_port(context, db_pool["id"])
if pool_port:
db_port = self.plugin._core_plugin._get_port(
context, pool_port["port_id"])
pool_utif_info = self._retrieve_utif_info(context, db_port)
pool_ip_allocation_info = utils.retrieve_ip_allocation_info(
context, db_port)
members = self.plugin.get_members(
context, filters={'id': db_pool['members']})
monitors = self.plugin.get_members(
context, filters={'id': db_pool['health_monitors']})
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.CREATE_VIP,
db_vip, context, None),
self._flavor, vip_utif_info, vip_ip_allocation_info,
pool_utif_info, pool_ip_allocation_info, db_pool, members,
monitors)
def update_vip(self, context, old_vip, vip):
new_pool = old_port_id = removed_ip = None
new_pool_utif = new_pool_ip_allocation = None
old_pool = {}
members = monitors = []
if old_vip['pool_id'] != vip['pool_id']:
new_pool = self.plugin.get_pool(
context, vip['pool_id'])
members = self.plugin.get_members(
context, filters={'id': new_pool['members']})
monitors = self.plugin.get_members(
context, filters={'id': new_pool['health_monitors']})
new_pool_port = edb.get_pool_port(context, new_pool["id"])
if new_pool_port:
db_port = self.plugin._core_plugin._get_port(
context, new_pool_port["port_id"])
new_pool_utif = self._retrieve_utif_info(context, db_port)
new_pool_ip_allocation = utils.retrieve_ip_allocation_info(
context, db_port)
old_pool = self.plugin.get_pool(
context, old_vip['pool_id'])
old_pool_port = edb.get_pool_port(context, old_pool["id"])
if old_pool_port:
old_port = self.plugin._core_plugin._get_port(
context, old_pool_port['port_id'])
# remove that subnet ip
removed_ip = old_port['fixed_ips'][0]['ip_address']
old_port_id = old_port['id']
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.UPDATE_VIP, vip,
context, None),
old_pool.get('id'), old_port_id, removed_ip, new_pool_utif,
new_pool_ip_allocation, new_pool, members, monitors)
def delete_vip(self, context, vip):
db_vip = self.plugin.populate_vip_graph(context, vip)
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_VIP, db_vip, context, None))
def create_pool(self, context, pool):
if pool['subnet_id']:
self._create_backend_port(context, pool)
def update_pool(self, context, old_pool, pool):
with context.session.begin(subtransactions=True):
if old_pool['vip_id']:
try:
db_vip = self.plugin._get_resource(
context, ldb.Vip, old_pool['vip_id'])
except lb_ext.VipNotFound:
return
monitors = self.plugin.get_members(
context, filters={'id': old_pool['health_monitors']})
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(econ.Events.UPDATE_POOL,
db_vip, context, None),
pool, monitors)
def delete_pool(self, context, pool):
edb.delete_pool_backend(context, pool['id'])
self.plugin._delete_db_pool(context, pool['id'])
def create_member(self, context, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None),
member, db_pool['protocol'])
def update_member(self, context, old_member, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if member['pool_id'] != old_member['pool_id']:
old_pool = self.plugin.get_pool(context, old_member['pool_id'])
if old_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
old_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.REMOVE_MEMBER, db_vip, context, None),
old_member)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(
context, ldb.Vip, db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_OR_UPDATE_MEMBER, db_vip, context, None),
member, db_pool['protocol'])
def delete_member(self, context, member):
db_pool = self.plugin.get_pool(context, member['pool_id'])
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_MEMBER, db_vip, context, None),
member)
else:
self._delete_member(context, member)
def stats(self, context, pool_id):
return {'bytes_in': 0,
'bytes_out': 0,
'active_connections': 0,
'total_connections': 0}
def create_pool_health_monitor(self, context, health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
# API call only if vip exists
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.ADD_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.UPDATE_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
db_pool = self.plugin.get_pool(context, pool_id)
if db_pool['vip_id']:
db_vip = self.plugin._get_resource(context, ldb.Vip,
db_pool['vip_id'])
self._dispatcher.dispatch_lb(
embrane_ctx.DispatcherContext(
econ.Events.DELETE_POOL_HM, db_vip, context, None),
health_monitor, pool_id)
else:
self._delete_pool_hm(context, health_monitor, pool_id)

View File

@ -1,30 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
import sqlalchemy as sql
from neutron.db import model_base
class PoolPort(model_base.BASEV2):
"""Represents the connection between pools and ports."""
__tablename__ = 'embrane_pool_port'
pool_id = sql.Column(sql.String(36), sql.ForeignKey('pools.id'),
primary_key=True)
port_id = sql.Column(sql.String(36), sql.ForeignKey('ports.id'),
nullable=False)

View File

@ -1,71 +0,0 @@
# Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. ivar@embrane.com
from heleosapi import exceptions as h_exc
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as sdb
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants as ccon
from neutron.plugins.embrane.common import contexts as embrane_ctx
from neutron.services.loadbalancer.drivers.embrane import constants as econ
LOG = logging.getLogger(__name__)
skip_states = [ccon.PENDING_CREATE,
ccon.PENDING_DELETE,
ccon.PENDING_UPDATE,
ccon.ERROR]
class Poller(object):
def __init__(self, driver):
self.dispatcher = driver._dispatcher
service_type_manager = sdb.ServiceTypeManager.get_instance()
self.provider = (service_type_manager.get_service_providers(
None, filters={
'service_type': [ccon.LOADBALANCER],
'driver': ['neutron.services.loadbalancer.drivers.'
'embrane.driver.EmbraneLbaas']}))[0]['name']
def start_polling(self, interval):
loop_call = loopingcall.FixedIntervalLoopingCall(self._run)
loop_call.start(interval=interval)
return loop_call
def _run(self):
ctx = context.get_admin_context()
try:
self.synchronize_vips(ctx)
except h_exc.PollingException as e:
LOG.exception(_('Unhandled exception occurred'), e)
def synchronize_vips(self, ctx):
session = ctx.session
vips = session.query(ldb.Vip).join(
sdb.ProviderResourceAssociation,
sdb.ProviderResourceAssociation.resource_id ==
ldb.Vip.pool_id).filter(
sdb.ProviderResourceAssociation.provider_name == self.provider)
# No need to check pending states
for vip in vips:
if vip['status'] not in skip_states:
self.dispatcher.dispatch_lb(
d_context=embrane_ctx.DispatcherContext(
econ.Events.POLL_GRAPH, vip, ctx, None),
args=())

View File

@ -1,17 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost

View File

@ -1,238 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import itertools
from six import moves
from neutron.agent.linux import utils
from neutron.plugins.common import constants as qconstants
from neutron.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'scur',
constants.STATS_MAX_CONNECTIONS: 'smax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_CONNECTIONS: 'stot',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE_PENDING_STATUSES = qconstants.ACTIVE_PENDING_STATUSES
INACTIVE = qconstants.INACTIVE
def save_config(conf_path, logical_config, socket_path=None,
user_group='nogroup'):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(logical_config, socket_path=socket_path,
user_group=user_group))
data.extend(_build_defaults(logical_config))
data.extend(_build_frontend(logical_config))
data.extend(_build_backend(logical_config))
utils.replace_file(conf_path, '\n'.join(data))
def _build_global(config, socket_path=None, user_group='nogroup'):
opts = [
'daemon',
'user nobody',
'group %s' % user_group,
'log /dev/log local0',
'log /dev/log local1 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults(config):
opts = [
'log global',
'retries 3',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option tcplog',
'bind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if ((member['status'] in ACTIVE_PENDING_STATUSES or
member['status'] == INACTIVE)
and member['admin_state_up']):
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %d' % config['members'].index(member)
opts.append(server)
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for monitor in config['healthmonitors']:
# not checking the status of healthmonitor for two reasons:
# 1) status field is absent in HealthMonitor model
# 2) only active HealthMonitors are fetched with
# LoadBalancerCallbacks.get_logical_device
if monitor['admin_state_up']:
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE and
config.get('members')):
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in moves.xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval

View File

@ -1,396 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
import shutil
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_device_driver
from neutron.services.loadbalancer import constants as lb_const
from neutron.services.loadbalancer.drivers.haproxy import cfg as hacfg
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
DRIVER_NAME = 'haproxy_ns'
STATE_PATH_DEFAULT = '$state_path/lbaas'
USER_GROUP_DEFAULT = 'nogroup'
OPTS = [
cfg.StrOpt(
'loadbalancer_state_path',
default=STATE_PATH_DEFAULT,
help=_('Location to store config and state files'),
deprecated_opts=[cfg.DeprecatedOpt('loadbalancer_state_path')],
),
cfg.StrOpt(
'user_group',
default=USER_GROUP_DEFAULT,
help=_('The user group'),
deprecated_opts=[cfg.DeprecatedOpt('user_group')],
),
cfg.IntOpt(
'send_gratuitous_arp',
default=3,
help=_('When delete and re-add the same vip, send this many '
'gratuitous ARPs to flush the ARP cache in the Router. '
'Set it below or equal to 0 to disable this feature.'),
)
]
cfg.CONF.register_opts(OPTS, 'haproxy')
class HaproxyNSDriver(agent_device_driver.AgentDeviceDriver):
def __init__(self, conf, plugin_rpc):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.state_path = conf.haproxy.loadbalancer_state_path
try:
vif_driver = importutils.import_object(conf.interface_driver, conf)
except ImportError:
with excutils.save_and_reraise_exception():
msg = (_('Error importing interface driver: %s')
% conf.haproxy.interface_driver)
LOG.error(msg)
self.vif_driver = vif_driver
self.plugin_rpc = plugin_rpc
self.pool_to_port_id = {}
@classmethod
def get_name(cls):
return DRIVER_NAME
def create(self, logical_config):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
self._plug(namespace, logical_config['vip']['port'])
self._spawn(logical_config)
def update(self, logical_config):
pool_id = logical_config['pool']['id']
pid_path = self._get_state_file_path(pool_id, 'pid')
extra_args = ['-sf']
extra_args.extend(p.strip() for p in open(pid_path, 'r'))
self._spawn(logical_config, extra_args)
def _spawn(self, logical_config, extra_cmd_args=()):
pool_id = logical_config['pool']['id']
namespace = get_ns_name(pool_id)
conf_path = self._get_state_file_path(pool_id, 'conf')
pid_path = self._get_state_file_path(pool_id, 'pid')
sock_path = self._get_state_file_path(pool_id, 'sock')
user_group = self.conf.haproxy.user_group
hacfg.save_config(conf_path, logical_config, sock_path, user_group)
cmd = ['haproxy', '-f', conf_path, '-p', pid_path]
cmd.extend(extra_cmd_args)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
ns.netns.execute(cmd)
# remember the pool<>port mapping
self.pool_to_port_id[pool_id] = logical_config['vip']['port']['id']
@n_utils.synchronized('haproxy-driver')
def undeploy_instance(self, pool_id, cleanup_namespace=False):
namespace = get_ns_name(pool_id)
ns = ip_lib.IPWrapper(self.root_helper, namespace)
pid_path = self._get_state_file_path(pool_id, 'pid')
# kill the process
kill_pids_in_file(self.root_helper, pid_path)
# unplug the ports
if pool_id in self.pool_to_port_id:
self._unplug(namespace, self.pool_to_port_id[pool_id])
# delete all devices from namespace;
# used when deleting orphans and port_id is not known for pool_id
if cleanup_namespace:
for device in ns.get_devices(exclude_loopback=True):
self.vif_driver.unplug(device.name, namespace=namespace)
# remove the configuration directory
conf_dir = os.path.dirname(self._get_state_file_path(pool_id, ''))
if os.path.isdir(conf_dir):
shutil.rmtree(conf_dir)
ns.garbage_collect_namespace()
def exists(self, pool_id):
namespace = get_ns_name(pool_id)
root_ns = ip_lib.IPWrapper(self.root_helper)
socket_path = self._get_state_file_path(pool_id, 'sock')
if root_ns.netns.exists(namespace) and os.path.exists(socket_path):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
return True
except socket.error:
pass
return False
def get_stats(self, pool_id):
socket_path = self._get_state_file_path(pool_id, 'sock')
TYPE_BACKEND_REQUEST = 2
TYPE_SERVER_REQUEST = 4
if os.path.exists(socket_path):
parsed_stats = self._get_stats_from_socket(
socket_path,
entity_type=TYPE_BACKEND_REQUEST | TYPE_SERVER_REQUEST)
pool_stats = self._get_backend_stats(parsed_stats)
pool_stats['members'] = self._get_servers_stats(parsed_stats)
return pool_stats
else:
LOG.warn(_('Stats socket not found for pool %s'), pool_id)
return {}
def _get_backend_stats(self, parsed_stats):
TYPE_BACKEND_RESPONSE = '1'
for stats in parsed_stats:
if stats.get('type') == TYPE_BACKEND_RESPONSE:
unified_stats = dict((k, stats.get(v, ''))
for k, v in hacfg.STATS_MAP.items())
return unified_stats
return {}
def _get_servers_stats(self, parsed_stats):
TYPE_SERVER_RESPONSE = '2'
res = {}
for stats in parsed_stats:
if stats.get('type') == TYPE_SERVER_RESPONSE:
res[stats['svname']] = {
lb_const.STATS_STATUS: (constants.INACTIVE
if stats['status'] == 'DOWN'
else constants.ACTIVE),
lb_const.STATS_HEALTH: stats['check_status'],
lb_const.STATS_FAILED_CHECKS: stats['chkfail']
}
return res
def _get_stats_from_socket(self, socket_path, entity_type):
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % entity_type)
raw_stats = ''
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
return self._parse_stats(raw_stats)
except socket.error as e:
LOG.warn(_('Error while connecting to stats socket: %s'), e)
return {}
def _parse_stats(self, raw_stats):
stat_lines = raw_stats.splitlines()
if len(stat_lines) < 2:
return []
stat_names = [name.strip('# ') for name in stat_lines[0].split(',')]
res_stats = []
for raw_values in stat_lines[1:]:
if not raw_values:
continue
stat_values = [value.strip() for value in raw_values.split(',')]
res_stats.append(dict(zip(stat_names, stat_values)))
return res_stats
def _get_state_file_path(self, pool_id, kind, ensure_state_dir=True):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.state_path))
conf_dir = os.path.join(confs_dir, pool_id)
if ensure_state_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _plug(self, namespace, port, reuse_existing=True):
self.plugin_rpc.plug_vip_port(port['id'])
interface_name = self.vif_driver.get_device_name(Wrap(port))
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name
)
else:
self.vif_driver.plug(
port['network_id'],
port['id'],
interface_name,
port['mac_address'],
namespace=namespace
)
cidrs = [
'%s/%s' % (ip['ip_address'],
netaddr.IPNetwork(ip['subnet']['cidr']).prefixlen)
for ip in port['fixed_ips']
]
self.vif_driver.init_l3(interface_name, cidrs, namespace=namespace)
gw_ip = port['fixed_ips'][0]['subnet'].get('gateway_ip')
if not gw_ip:
host_routes = port['fixed_ips'][0]['subnet'].get('host_routes', [])
for host_route in host_routes:
if host_route['destination'] == "0.0.0.0/0":
gw_ip = host_route['nexthop']
break
if gw_ip:
cmd = ['route', 'add', 'default', 'gw', gw_ip]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
namespace=namespace)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
# When delete and re-add the same vip, we need to
# send gratuitous ARP to flush the ARP cache in the Router.
gratuitous_arp = self.conf.haproxy.send_gratuitous_arp
if gratuitous_arp > 0:
for ip in port['fixed_ips']:
cmd_arping = ['arping', '-U',
'-I', interface_name,
'-c', gratuitous_arp,
ip['ip_address']]
ip_wrapper.netns.execute(cmd_arping, check_exit_code=False)
def _unplug(self, namespace, port_id):
port_stub = {'id': port_id}
self.plugin_rpc.unplug_vip_port(port_id)
interface_name = self.vif_driver.get_device_name(Wrap(port_stub))
self.vif_driver.unplug(interface_name, namespace=namespace)
@n_utils.synchronized('haproxy-driver')
def deploy_instance(self, logical_config):
# do actual deploy only if vip and pool are configured and active
if (not logical_config or
'vip' not in logical_config or
(logical_config['vip']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['vip']['admin_state_up'] or
(logical_config['pool']['status'] not in
constants.ACTIVE_PENDING_STATUSES) or
not logical_config['pool']['admin_state_up']):
return
if self.exists(logical_config['pool']['id']):
self.update(logical_config)
else:
self.create(logical_config)
def _refresh_device(self, pool_id):
logical_config = self.plugin_rpc.get_logical_device(pool_id)
self.deploy_instance(logical_config)
def create_vip(self, vip):
self._refresh_device(vip['pool_id'])
def update_vip(self, old_vip, vip):
self._refresh_device(vip['pool_id'])
def delete_vip(self, vip):
self.undeploy_instance(vip['pool_id'])
def create_pool(self, pool):
# nothing to do here because a pool needs a vip to be useful
pass
def update_pool(self, old_pool, pool):
self._refresh_device(pool['id'])
def delete_pool(self, pool):
# delete_pool may be called before vip deletion in case
# pool's admin state set to down
if self.exists(pool['id']):
self.undeploy_instance(pool['id'])
def create_member(self, member):
self._refresh_device(member['pool_id'])
def update_member(self, old_member, member):
self._refresh_device(member['pool_id'])
def delete_member(self, member):
self._refresh_device(member['pool_id'])
def create_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def update_pool_health_monitor(self, old_health_monitor, health_monitor,
pool_id):
self._refresh_device(pool_id)
def delete_pool_health_monitor(self, health_monitor, pool_id):
self._refresh_device(pool_id)
def remove_orphans(self, known_pool_ids):
if not os.path.exists(self.state_path):
return
orphans = (pool_id for pool_id in os.listdir(self.state_path)
if pool_id not in known_pool_ids)
for pool_id in orphans:
if self.exists(pool_id):
self.undeploy_instance(pool_id, cleanup_namespace=True)
# NOTE (markmcclain) For compliance with interface.py which expects objects
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
def get_ns_name(namespace_id):
return NS_PREFIX + namespace_id
def kill_pids_in_file(root_helper, pid_path):
if os.path.exists(pid_path):
with open(pid_path, 'r') as pids:
for pid in pids:
pid = pid.strip()
try:
utils.execute(['kill', '-9', pid], root_helper)
except RuntimeError:
LOG.exception(
_('Unable to kill haproxy process: %s'),
pid
)

View File

@ -1,23 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.services.loadbalancer.drivers.common import agent_driver_base
from neutron.services.loadbalancer.drivers.haproxy import namespace_driver
class HaproxyOnHostPluginDriver(agent_driver_base.AgentDriverBase):
device_driver = namespace_driver.DRIVER_NAME

View File

@ -1,182 +0,0 @@
# Copyright 2014 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import requests
from neutron.common import exceptions as n_exc
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONTENT_TYPE_HEADER = 'Content-type'
ACCEPT_HEADER = 'Accept'
AUTH_HEADER = 'Authorization'
DRIVER_HEADER = 'X-OpenStack-LBaaS'
TENANT_HEADER = 'X-Tenant-ID'
JSON_CONTENT_TYPE = 'application/json'
DRIVER_HEADER_VALUE = 'netscaler-openstack-lbaas'
class NCCException(n_exc.NeutronException):
"""Represents exceptions thrown by NSClient."""
CONNECTION_ERROR = 1
REQUEST_ERROR = 2
RESPONSE_ERROR = 3
UNKNOWN_ERROR = 4
def __init__(self, error):
self.message = _("NCC Error %d") % error
super(NCCException, self).__init__()
self.error = error
class NSClient(object):
"""Client to operate on REST resources of NetScaler Control Center."""
def __init__(self, service_uri, username, password):
if not service_uri:
msg = _("No NetScaler Control Center URI specified. "
"Cannot connect.")
LOG.exception(msg)
raise NCCException(NCCException.CONNECTION_ERROR)
self.service_uri = service_uri.strip('/')
self.auth = None
if username and password:
base64string = base64.encodestring("%s:%s" % (username, password))
base64string = base64string[:-1]
self.auth = 'Basic %s' % base64string
def create_resource(self, tenant_id, resource_path, object_name,
object_data):
"""Create a resource of NetScaler Control Center."""
return self._resource_operation('POST', tenant_id,
resource_path,
object_name=object_name,
object_data=object_data)
def retrieve_resource(self, tenant_id, resource_path, parse_response=True):
"""Retrieve a resource of NetScaler Control Center."""
return self._resource_operation('GET', tenant_id, resource_path)
def update_resource(self, tenant_id, resource_path, object_name,
object_data):
"""Update a resource of the NetScaler Control Center."""
return self._resource_operation('PUT', tenant_id,
resource_path,
object_name=object_name,
object_data=object_data)
def remove_resource(self, tenant_id, resource_path, parse_response=True):
"""Remove a resource of NetScaler Control Center."""
return self._resource_operation('DELETE', tenant_id, resource_path)
def _resource_operation(self, method, tenant_id, resource_path,
object_name=None, object_data=None):
resource_uri = "%s/%s" % (self.service_uri, resource_path)
headers = self._setup_req_headers(tenant_id)
request_body = None
if object_data:
if isinstance(object_data, str):
request_body = object_data
else:
obj_dict = {object_name: object_data}
request_body = jsonutils.dumps(obj_dict)
response_status, resp_dict = self._execute_request(method,
resource_uri,
headers,
body=request_body)
return response_status, resp_dict
def _is_valid_response(self, response_status):
# when status is less than 400, the response is fine
return response_status < requests.codes.bad_request
def _setup_req_headers(self, tenant_id):
headers = {ACCEPT_HEADER: JSON_CONTENT_TYPE,
CONTENT_TYPE_HEADER: JSON_CONTENT_TYPE,
DRIVER_HEADER: DRIVER_HEADER_VALUE,
TENANT_HEADER: tenant_id,
AUTH_HEADER: self.auth}
return headers
def _get_response_dict(self, response):
response_dict = {'status': response.status_code,
'body': response.text,
'headers': response.headers}
if self._is_valid_response(response.status_code):
if response.text:
response_dict['dict'] = response.json()
return response_dict
def _execute_request(self, method, resource_uri, headers, body=None):
try:
response = requests.request(method, url=resource_uri,
headers=headers, data=body)
except requests.exceptions.ConnectionError:
msg = (_("Connection error occurred while connecting to %s") %
self.service_uri)
LOG.exception(msg)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.SSLError:
msg = (_("SSL error occurred while connecting to %s") %
self.service_uri)
LOG.exception(msg)
raise NCCException(NCCException.CONNECTION_ERROR)
except requests.exceptions.Timeout:
msg = _("Request to %s timed out") % self.service_uri
LOG.exception(msg)
raise NCCException(NCCException.CONNECTION_ERROR)
except (requests.exceptions.URLRequired,
requests.exceptions.InvalidURL,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema):
msg = _("Request did not specify a valid URL")
LOG.exception(msg)
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.TooManyRedirects:
msg = _("Too many redirects occurred for request to %s")
LOG.exception(msg)
raise NCCException(NCCException.REQUEST_ERROR)
except requests.exceptions.RequestException:
msg = (_("A request error while connecting to %s") %
self.service_uri)
LOG.exception(msg)
raise NCCException(NCCException.REQUEST_ERROR)
except Exception:
msg = (_("A unknown error occurred during request to %s") %
self.service_uri)
LOG.exception(msg)
raise NCCException(NCCException.UNKNOWN_ERROR)
resp_dict = self._get_response_dict(response)
LOG.debug(_("Response: %s"), resp_dict['body'])
response_status = resp_dict['status']
if response_status == requests.codes.unauthorized:
LOG.exception(_("Unable to login. Invalid credentials passed."
"for: %s"), self.service_uri)
raise NCCException(NCCException.RESPONSE_ERROR)
if not self._is_valid_response(response_status):
msg = (_("Failed %(method)s operation on %(url)s "
"status code: %(response_status)s") %
{"method": method,
"url": resource_uri,
"response_status": response_status})
LOG.exception(msg)
raise NCCException(NCCException.RESPONSE_ERROR)
return response_status, resp_dict

View File

@ -1,489 +0,0 @@
# Copyright 2014 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.db.loadbalancer import loadbalancer_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
from neutron.services.loadbalancer.drivers.netscaler import ncc_client
LOG = logging.getLogger(__name__)
NETSCALER_CC_OPTS = [
cfg.StrOpt(
'netscaler_ncc_uri',
help=_('The URL to reach the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_username',
help=_('Username to login to the NetScaler Control Center Server.'),
),
cfg.StrOpt(
'netscaler_ncc_password',
help=_('Password to login to the NetScaler Control Center Server.'),
)
]
cfg.CONF.register_opts(NETSCALER_CC_OPTS, 'netscaler_driver')
VIPS_RESOURCE = 'vips'
VIP_RESOURCE = 'vip'
POOLS_RESOURCE = 'pools'
POOL_RESOURCE = 'pool'
POOLMEMBERS_RESOURCE = 'members'
POOLMEMBER_RESOURCE = 'member'
MONITORS_RESOURCE = 'healthmonitors'
MONITOR_RESOURCE = 'healthmonitor'
POOLSTATS_RESOURCE = 'statistics'
PROV_SEGMT_ID = 'provider:segmentation_id'
PROV_NET_TYPE = 'provider:network_type'
DRIVER_NAME = 'netscaler_driver'
class NetScalerPluginDriver(abstract_driver.LoadBalancerAbstractDriver):
"""NetScaler LBaaS Plugin driver class."""
def __init__(self, plugin):
self.plugin = plugin
ncc_uri = cfg.CONF.netscaler_driver.netscaler_ncc_uri
ncc_username = cfg.CONF.netscaler_driver.netscaler_ncc_username
ncc_password = cfg.CONF.netscaler_driver.netscaler_ncc_password
self.client = ncc_client.NSClient(ncc_uri,
ncc_username,
ncc_password)
def create_vip(self, context, vip):
"""Create a vip on a NetScaler device."""
network_info = self._get_vip_network_info(context, vip)
ncc_vip = self._prepare_vip_for_creation(vip)
ncc_vip = dict(ncc_vip.items() + network_info.items())
msg = _("NetScaler driver vip creation: %s") % repr(ncc_vip)
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, VIPS_RESOURCE,
VIP_RESOURCE, ncc_vip)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Vip, vip["id"],
status)
def update_vip(self, context, old_vip, vip):
"""Update a vip on a NetScaler device."""
update_vip = self._prepare_vip_for_update(vip)
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
msg = (_("NetScaler driver vip %(vip_id)s update: %(vip_obj)s") %
{"vip_id": vip["id"], "vip_obj": repr(vip)})
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
VIP_RESOURCE, update_vip)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Vip, old_vip["id"],
status)
def delete_vip(self, context, vip):
"""Delete a vip on a NetScaler device."""
resource_path = "%s/%s" % (VIPS_RESOURCE, vip["id"])
msg = _("NetScaler driver vip removal: %s") % vip["id"]
LOG.debug(msg)
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Vip,
vip["id"],
constants.ERROR)
else:
self.plugin._delete_db_vip(context, vip['id'])
def create_pool(self, context, pool):
"""Create a pool on a NetScaler device."""
network_info = self._get_pool_network_info(context, pool)
#allocate a snat port/ipaddress on the subnet if one doesn't exist
self._create_snatport_for_subnet_if_not_exists(context,
pool['tenant_id'],
pool['subnet_id'],
network_info)
ncc_pool = self._prepare_pool_for_creation(pool)
ncc_pool = dict(ncc_pool.items() + network_info.items())
msg = _("NetScaler driver pool creation: %s") % repr(ncc_pool)
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, POOLS_RESOURCE,
POOL_RESOURCE, ncc_pool)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Pool,
ncc_pool["id"], status)
def update_pool(self, context, old_pool, pool):
"""Update a pool on a NetScaler device."""
ncc_pool = self._prepare_pool_for_update(pool)
resource_path = "%s/%s" % (POOLS_RESOURCE, old_pool["id"])
msg = (_("NetScaler driver pool %(pool_id)s update: %(pool_obj)s") %
{"pool_id": old_pool["id"], "pool_obj": repr(ncc_pool)})
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
POOL_RESOURCE, ncc_pool)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Pool,
old_pool["id"], status)
def delete_pool(self, context, pool):
"""Delete a pool on a NetScaler device."""
resource_path = "%s/%s" % (POOLS_RESOURCE, pool['id'])
msg = _("NetScaler driver pool removal: %s") % pool["id"]
LOG.debug(msg)
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Pool,
pool["id"],
constants.ERROR)
else:
self.plugin._delete_db_pool(context, pool['id'])
self._remove_snatport_for_subnet_if_not_used(context,
pool['tenant_id'],
pool['subnet_id'])
def create_member(self, context, member):
"""Create a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_creation(member)
msg = (_("NetScaler driver poolmember creation: %s") %
repr(ncc_member))
LOG.info(msg)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id,
POOLMEMBERS_RESOURCE,
POOLMEMBER_RESOURCE,
ncc_member)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Member,
member["id"], status)
def update_member(self, context, old_member, member):
"""Update a pool member on a NetScaler device."""
ncc_member = self._prepare_member_for_update(member)
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, old_member["id"])
msg = (_("NetScaler driver poolmember %(member_id)s update:"
" %(member_obj)s") %
{"member_id": old_member["id"],
"member_obj": repr(ncc_member)})
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
POOLMEMBER_RESOURCE, ncc_member)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_status(context, loadbalancer_db.Member,
old_member["id"], status)
def delete_member(self, context, member):
"""Delete a pool member on a NetScaler device."""
resource_path = "%s/%s" % (POOLMEMBERS_RESOURCE, member['id'])
msg = (_("NetScaler driver poolmember removal: %s") %
member["id"])
LOG.debug(msg)
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Member,
member["id"],
constants.ERROR)
else:
self.plugin._delete_db_member(context, member['id'])
def create_pool_health_monitor(self, context, health_monitor, pool_id):
"""Create a pool health monitor on a NetScaler device."""
ncc_hm = self._prepare_healthmonitor_for_creation(health_monitor,
pool_id)
resource_path = "%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE)
msg = (_("NetScaler driver healthmonitor creation for pool %(pool_id)s"
": %(monitor_obj)s") %
{"pool_id": pool_id,
"monitor_obj": repr(ncc_hm)})
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.create_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE,
ncc_hm)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_pool_health_monitor(context,
health_monitor['id'],
pool_id,
status, "")
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
"""Update a pool health monitor on a NetScaler device."""
ncc_hm = self._prepare_healthmonitor_for_update(health_monitor)
resource_path = "%s/%s" % (MONITORS_RESOURCE,
old_health_monitor["id"])
msg = (_("NetScaler driver healthmonitor %(monitor_id)s update: "
"%(monitor_obj)s") %
{"monitor_id": old_health_monitor["id"],
"monitor_obj": repr(ncc_hm)})
LOG.debug(msg)
status = constants.ACTIVE
try:
self.client.update_resource(context.tenant_id, resource_path,
MONITOR_RESOURCE, ncc_hm)
except ncc_client.NCCException:
status = constants.ERROR
self.plugin.update_pool_health_monitor(context,
old_health_monitor['id'],
pool_id,
status, "")
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
"""Delete a pool health monitor on a NetScaler device."""
resource_path = "%s/%s/%s/%s" % (POOLS_RESOURCE, pool_id,
MONITORS_RESOURCE,
health_monitor["id"])
msg = (_("NetScaler driver healthmonitor %(monitor_id)s"
"removal for pool %(pool_id)s") %
{"monitor_id": health_monitor["id"],
"pool_id": pool_id})
LOG.debug(msg)
try:
self.client.remove_resource(context.tenant_id, resource_path)
except ncc_client.NCCException:
self.plugin.update_pool_health_monitor(context,
health_monitor['id'],
pool_id,
constants.ERROR, "")
else:
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def stats(self, context, pool_id):
"""Retrieve pool statistics from the NetScaler device."""
resource_path = "%s/%s" % (POOLSTATS_RESOURCE, pool_id)
msg = _("NetScaler driver pool stats retrieval: %s") % pool_id
LOG.debug(msg)
try:
stats = self.client.retrieve_resource(context.tenant_id,
resource_path)[1]
except ncc_client.NCCException:
self.plugin.update_status(context, loadbalancer_db.Pool,
pool_id, constants.ERROR)
else:
return stats
def _prepare_vip_for_creation(self, vip):
creation_attrs = {
'id': vip['id'],
'tenant_id': vip['tenant_id'],
'protocol': vip['protocol'],
'address': vip['address'],
'protocol_port': vip['protocol_port'],
}
if 'session_persistence' in vip:
creation_attrs['session_persistence'] = vip['session_persistence']
update_attrs = self._prepare_vip_for_update(vip)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_vip_for_update(self, vip):
return {
'name': vip['name'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']
}
def _prepare_pool_for_creation(self, pool):
creation_attrs = {
'id': pool['id'],
'tenant_id': pool['tenant_id'],
'vip_id': pool['vip_id'],
'protocol': pool['protocol'],
'subnet_id': pool['subnet_id'],
}
update_attrs = self._prepare_pool_for_update(pool)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_pool_for_update(self, pool):
return {
'name': pool['name'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']
}
def _prepare_member_for_creation(self, member):
creation_attrs = {
'id': member['id'],
'tenant_id': member['tenant_id'],
'address': member['address'],
'protocol_port': member['protocol_port'],
}
update_attrs = self._prepare_member_for_update(member)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_member_for_update(self, member):
return {
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']
}
def _prepare_healthmonitor_for_creation(self, health_monitor, pool_id):
creation_attrs = {
'id': health_monitor['id'],
'tenant_id': health_monitor['tenant_id'],
'type': health_monitor['type'],
}
update_attrs = self._prepare_healthmonitor_for_update(health_monitor)
creation_attrs.update(update_attrs)
return creation_attrs
def _prepare_healthmonitor_for_update(self, health_monitor):
ncc_hm = {
'delay': health_monitor['delay'],
'timeout': health_monitor['timeout'],
'max_retries': health_monitor['max_retries'],
'admin_state_up': health_monitor['admin_state_up']
}
if health_monitor['type'] in ['HTTP', 'HTTPS']:
ncc_hm['http_method'] = health_monitor['http_method']
ncc_hm['url_path'] = health_monitor['url_path']
ncc_hm['expected_codes'] = health_monitor['expected_codes']
return ncc_hm
def _get_network_info(self, context, entity):
network_info = {}
subnet_id = entity['subnet_id']
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
network = self.plugin._core_plugin.get_network(context, network_id)
network_info['network_id'] = network_id
network_info['subnet_id'] = subnet_id
if PROV_NET_TYPE in network:
network_info['network_type'] = network[PROV_NET_TYPE]
if PROV_SEGMT_ID in network:
network_info['segmentation_id'] = network[PROV_SEGMT_ID]
return network_info
def _get_vip_network_info(self, context, vip):
network_info = self._get_network_info(context, vip)
network_info['port_id'] = vip['port_id']
return network_info
def _get_pool_network_info(self, context, pool):
return self._get_network_info(context, pool)
def _get_pools_on_subnet(self, context, tenant_id, subnet_id):
filter_dict = {'subnet_id': [subnet_id], 'tenant_id': [tenant_id]}
return self.plugin.get_pools(context, filters=filter_dict)
def _get_snatport_for_subnet(self, context, tenant_id, subnet_id):
device_id = '_lb-snatport-' + subnet_id
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
network_id = subnet['network_id']
msg = (_("Filtering ports based on network_id=%(network_id)s, "
"tenant_id=%(tenant_id)s, device_id=%(device_id)s") %
{'network_id': network_id,
'tenant_id': tenant_id,
'device_id': device_id})
LOG.debug(msg)
filter_dict = {
'network_id': [network_id],
'tenant_id': [tenant_id],
'device_id': [device_id],
'device-owner': [DRIVER_NAME]
}
ports = self.plugin._core_plugin.get_ports(context,
filters=filter_dict)
if ports:
msg = _("Found an existing SNAT port for subnet %s") % subnet_id
LOG.info(msg)
return ports[0]
msg = _("Found no SNAT ports for subnet %s") % subnet_id
LOG.info(msg)
def _create_snatport_for_subnet(self, context, tenant_id, subnet_id,
ip_address):
subnet = self.plugin._core_plugin.get_subnet(context, subnet_id)
fixed_ip = {'subnet_id': subnet['id']}
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
fixed_ip['ip_address'] = ip_address
port_data = {
'tenant_id': tenant_id,
'name': '_lb-snatport-' + subnet_id,
'network_id': subnet['network_id'],
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '_lb-snatport-' + subnet_id,
'device_owner': DRIVER_NAME,
'fixed_ips': [fixed_ip],
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
msg = _("Created SNAT port: %s") % repr(port)
LOG.info(msg)
return port
def _remove_snatport_for_subnet(self, context, tenant_id, subnet_id):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if port:
self.plugin._core_plugin.delete_port(context, port['id'])
msg = _("Removed SNAT port: %s") % repr(port)
LOG.info(msg)
def _create_snatport_for_subnet_if_not_exists(self, context, tenant_id,
subnet_id, network_info):
port = self._get_snatport_for_subnet(context, tenant_id, subnet_id)
if not port:
msg = _("No SNAT port found for subnet %s."
" Creating one...") % subnet_id
LOG.info(msg)
port = self._create_snatport_for_subnet(context, tenant_id,
subnet_id,
ip_address=None)
network_info['port_id'] = port['id']
network_info['snat_ip'] = port['fixed_ips'][0]['ip_address']
msg = _("SNAT port: %s") % repr(port)
LOG.info(msg)
def _remove_snatport_for_subnet_if_not_used(self, context, tenant_id,
subnet_id):
pools = self._get_pools_on_subnet(context, tenant_id, subnet_id)
if not pools:
#No pools left on the old subnet.
#We can remove the SNAT port/ipaddress
self._remove_snatport_for_subnet(context, tenant_id, subnet_id)
msg = _("Removing SNAT port for subnet %s "
"as this is the last pool using it...") % subnet_id
LOG.info(msg)

View File

@ -1,17 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LLC (Radware)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Evgeny Fedoruk, Radware
from neutron.common import exceptions
class RadwareLBaasException(exceptions.NeutronException):
message = _('An unknown exception occurred in Radware LBaaS provider.')
class AuthenticationMissing(RadwareLBaasException):
message = _('vDirect user/password missing. '
'Specify in configuration file, under [radware] section')
class WorkflowMissing(RadwareLBaasException):
message = _('Workflow %(workflow)s is missing on vDirect server. '
'Upload missing workflow')
class RESTRequestFailure(RadwareLBaasException):
message = _('REST request failed with status %(status)s. '
'Reason: %(reason)s, Description: %(description)s. '
'Success status codes are %(success_codes)s')
class UnsupportedEntityOperation(RadwareLBaasException):
message = _('%(operation)s operation is not supported for %(entity)s.')

View File

@ -1,326 +0,0 @@
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(ldb.LoadBalancerPluginDb,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
qdbapi.register_models()
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
try:
driver.create_pool(context, p)
except loadbalancer.NoEligibleBackend:
# that should catch cases when backend of any kind
# is not available (agent, appliance, etc)
self.update_status(context, ldb.Pool,
p['id'], constants.ERROR,
"No eligible backend")
raise loadbalancer.NoEligibleBackend(pool_id=p['id'])
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
try:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(
context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
except Exception:
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,
id, constants.ERROR)
def delete_pool(self, context, id):
# check for delete conditions and update the status
# within a transaction to avoid a race
with context.session.begin(subtransactions=True):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
self._ensure_pool_delete_conditions(context, id)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def _validate_hm_parameters(self, delay, timeout):
if delay < timeout:
raise loadbalancer.DelayOrTimeoutInvalid()
def create_health_monitor(self, context, health_monitor):
new_hm = health_monitor['health_monitor']
self._validate_hm_parameters(new_hm['delay'], new_hm['timeout'])
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
new_hm = health_monitor['health_monitor']
old_hm = self.get_health_monitor(context, id)
delay = new_hm.get('delay', old_hm.get('delay'))
timeout = new_hm.get('timeout', old_hm.get('timeout'))
self._validate_hm_parameters(delay, timeout)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_pool_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)

View File

@ -1,15 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,15 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,297 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import config as common_config
from neutron.common import constants as constants
from neutron.common import rpc as n_rpc
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(rpc_compat.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, host):
super(MeteringPluginRpc,
self).__init__(topic=topics.METERING_AGENT,
default_version=self.BASE_RPC_API_VERSION)
def _get_sync_data_metering(self, context):
try:
return self.call(context,
self.make_msg('get_sync_data_metering',
host=self.host),
topic=topics.METERING_PLUGIN)
except Exception:
LOG.exception(_("Failed synchronizing routers"))
class MeteringAgent(MeteringPluginRpc, manager.Manager):
Opts = [
cfg.StrOpt('driver',
default='neutron.services.metering.drivers.noop.'
'noop_driver.NoopMeteringDriver',
help=_("Metering driver")),
cfg.IntOpt('measure_interval', default=30,
help=_("Interval between two metering measures")),
cfg.IntOpt('report_interval', default=300,
help=_("Interval between two metering reports")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.root_helper = config.get_root_helper(self.conf)
self.context = context.get_admin_context_without_session()
self.metering_info = {}
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_tenant_id = {}
self.routers = {}
self.metering_infos = {}
super(MeteringAgent, self).__init__(host=host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info(_("Loading Metering driver %s"), self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = importutils.import_object(
self.conf.driver, self, self.conf)
def _metering_notification(self):
for label_id, info in self.metering_infos.items():
data = {'label_id': label_id,
'tenant_id': self.label_tenant_id.get(label_id),
'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
LOG.debug(_("Send metering report: %s"), data)
notifier = n_rpc.get_notifier('metering')
notifier.info(self.context, 'l3.meter', data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def _purge_metering_info(self):
ts = int(time.time())
report_interval = self.conf.report_interval
for label_id, info in self.metering_info.items():
if info['last_update'] > ts + report_interval:
del self.metering_info[label_id]
def _add_metering_info(self, label_id, pkts, bytes):
ts = int(time.time())
info = self.metering_infos.get(label_id, {'bytes': 0,
'pkts': 0,
'time': 0,
'first_update': ts,
'last_update': ts})
info['bytes'] += bytes
info['pkts'] += pkts
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[label_id] = info
return info
def _add_metering_infos(self):
self.label_tenant_id = {}
for router in self.routers.values():
tenant_id = router['tenant_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_tenant_id[label_id] = tenant_id
tenant_id = self.label_tenant_id.get
accs = self._get_traffic_counters(self.context, self.routers.values())
if not accs:
return
for label_id, acc in accs.items():
self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
def _metering_loop(self):
self._add_metering_infos()
ts = int(time.time())
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta > report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@utils.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except AttributeError:
LOG.exception(_("Driver %(driver)s does not implement %(func)s"),
{'driver': self.conf.driver,
'func': func_name})
except RuntimeError:
LOG.exception(_("Driver %(driver)s:%(func)s runtime error"),
{'driver': self.conf.driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug(_("Get router traffic counters"))
return self._invoke_driver(context, routers, 'get_traffic_counters')
def update_metering_label_rules(self, context, routers):
LOG.debug(_("Update metering rules from agent"))
return self._invoke_driver(context, routers,
'update_metering_label_rules')
def add_metering_label(self, context, routers):
LOG.debug(_("Creating a metering label from agent"))
return self._invoke_driver(context, routers,
'add_metering_label')
def remove_metering_label(self, context, routers):
self._add_metering_infos()
LOG.debug(_("Delete a metering label from agent"))
return self._invoke_driver(context, routers,
'remove_metering_label')
class MeteringAgentWithStateReport(MeteringAgent):
def __init__(self, host, conf=None):
super(MeteringAgentWithStateReport, self).__init__(host=host,
conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metering-agent',
'host': host,
'topic': topics.METERING_AGENT,
'configurations': {
'metering_driver': self.conf.driver,
'measure_interval':
self.conf.measure_interval,
'report_interval': self.conf.report_interval
},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_METERING}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
conf = cfg.CONF
conf.register_opts(MeteringAgent.Opts)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
common_config.init(sys.argv[1:])
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-metering-agent',
topic=topics.METERING_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.metering.agents.'
'metering_agent.MeteringAgentWithStateReport')
service.launch(server).wait()

View File

@ -1,15 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,51 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class MeteringAbstractDriver(object):
"""Abstract Metering driver."""
def __init__(self, plugin, conf):
pass
@abc.abstractmethod
def update_routers(self, context, routers):
pass
@abc.abstractmethod
def remove_router(self, context, router_id):
pass
@abc.abstractmethod
def update_metering_label_rules(self, context, routers):
pass
@abc.abstractmethod
def add_metering_label(self, context, routers):
pass
@abc.abstractmethod
def remove_metering_label(self, context, routers):
pass
@abc.abstractmethod
def get_traffic_counters(self, context, routers):
pass

View File

@ -1,15 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,284 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.common import constants as constants
from neutron.common import log
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.services.metering.drivers import abstract_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
WRAP_NAME = 'neutron-meter'
EXTERNAL_DEV_PREFIX = 'qg-'
TOP_CHAIN = WRAP_NAME + "-FORWARD"
RULE = '-r-'
LABEL = '-l-'
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
class IptablesManagerTransaction(object):
__transactions = {}
def __init__(self, im):
self.im = im
transaction = self.__transactions.get(im, 0)
transaction += 1
self.__transactions[im] = transaction
def __enter__(self):
return self.im
def __exit__(self, type, value, traceback):
transaction = self.__transactions.get(self.im)
if transaction == 1:
self.im.apply()
del self.__transactions[self.im]
else:
transaction -= 1
self.__transactions[self.im] = transaction
class RouterWithMetering(object):
def __init__(self, conf, router):
self.conf = conf
self.id = router['id']
self.router = router
self.root_helper = config.get_root_helper(self.conf)
self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
root_helper=self.root_helper,
namespace=self.ns_name,
binary_name=WRAP_NAME)
self.metering_labels = {}
class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
def __init__(self, plugin, conf):
self.plugin = plugin
self.conf = conf or cfg.CONF
self.routers = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
LOG.info(_("Loading interface driver %s"), self.conf.interface_driver)
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
def _update_router(self, router):
r = self.routers.get(router['id'],
RouterWithMetering(self.conf, router))
r.router = router
self.routers[r.id] = r
return r
@log.log
def update_routers(self, context, routers):
# disassociate removed routers
router_ids = [router['id'] for router in routers]
for router_id in self.routers:
if router_id not in router_ids:
self._process_disassociate_metering_label(router)
for router in routers:
old_gw_port_id = None
old_rm = self.routers.get(router['id'])
if old_rm:
old_gw_port_id = old_rm.router['gw_port_id']
gw_port_id = router['gw_port_id']
if gw_port_id != old_gw_port_id:
if old_rm:
with IptablesManagerTransaction(old_rm.iptables_manager):
self._process_disassociate_metering_label(router)
if gw_port_id:
self._process_associate_metering_label(router)
elif gw_port_id:
self._process_associate_metering_label(router)
@log.log
def remove_router(self, context, router_id):
if router_id in self.routers:
del self.routers[router_id]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def _process_metering_label_rules(self, rm, rules, label_chain,
rules_chain):
im = rm.iptables_manager
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
for rule in rules:
remote_ip = rule['remote_ip_prefix']
dir = '-i ' + ext_dev
if rule['direction'] == 'egress':
dir = '-o ' + ext_dev
if rule['excluded']:
ipt_rule = dir + ' -d ' + remote_ip + ' -j RETURN'
im.ipv4['filter'].add_rule(rules_chain, ipt_rule, wrap=False,
top=True)
else:
ipt_rule = dir + ' -d ' + remote_ip + ' -j ' + label_chain
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _process_associate_metering_label(self, router):
self._update_router(router)
rm = self.routers.get(router['id'])
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
'',
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
rm.metering_labels[label_id] = label
def _process_disassociate_metering_label(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
if label_id not in rm.metering_labels:
continue
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
wrap=False)
del rm.metering_labels[label_id]
@log.log
def add_metering_label(self, context, routers):
for router in routers:
self._process_associate_metering_label(router)
@log.log
def update_metering_label_rules(self, context, routers):
for router in routers:
self._update_metering_label_rules(router)
def _update_metering_label_rules(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
@log.log
def remove_metering_label(self, context, routers):
for router in routers:
self._process_disassociate_metering_label(router)
@log.log
def get_traffic_counters(self, context, routers):
accs = {}
for router in routers:
rm = self.routers.get(router['id'])
if not rm:
continue
for label_id, label in rm.metering_labels.items():
chain = iptables_manager.get_chain_name(WRAP_NAME + LABEL +
label_id, wrap=False)
chain_acc = rm.iptables_manager.get_traffic_counters(
chain, wrap=False, zero=True)
if not chain_acc:
continue
acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
acc['pkts'] += chain_acc['pkts']
acc['bytes'] += chain_acc['bytes']
accs[label_id] = acc
return accs

View File

@ -1,15 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,45 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import log
from neutron.services.metering.drivers import abstract_driver
class NoopMeteringDriver(abstract_driver.MeteringAbstractDriver):
@log.log
def update_routers(self, context, routers):
pass
@log.log
def remove_router(self, context, router_id):
pass
@log.log
def update_metering_label_rules(self, context, routers):
pass
@log.log
def add_metering_label(self, context, routers):
pass
@log.log
def remove_metering_label(self, context, routers):
pass
@log.log
def get_traffic_counters(self, context, routers):
pass

View File

@ -1,74 +0,0 @@
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import rpc_compat
from neutron.common import topics
from neutron.db.metering import metering_db
from neutron.db.metering import metering_rpc
class MeteringPlugin(metering_db.MeteringDbMixin):
"""Implementation of the Neutron Metering Service Plugin."""
supported_extension_aliases = ["metering"]
def __init__(self):
super(MeteringPlugin, self).__init__()
self.endpoints = [metering_rpc.MeteringRpcCallbacks(self)]
self.conn = rpc_compat.create_connection(new=True)
self.conn.create_consumer(
topics.METERING_PLUGIN, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def create_metering_label(self, context, metering_label):
label = super(MeteringPlugin, self).create_metering_label(
context, metering_label)
data = self.get_sync_data_metering(context)
self.meter_rpc.add_metering_label(context, data)
return label
def delete_metering_label(self, context, label_id):
data = self.get_sync_data_metering(context, label_id)
label = super(MeteringPlugin, self).delete_metering_label(
context, label_id)
self.meter_rpc.remove_metering_label(context, data)
return label
def create_metering_label_rule(self, context, metering_label_rule):
rule = super(MeteringPlugin, self).create_metering_label_rule(
context, metering_label_rule)
data = self.get_sync_data_metering(context)
self.meter_rpc.update_metering_label_rules(context, data)
return rule
def delete_metering_label_rule(self, context, rule_id):
rule = super(MeteringPlugin, self).delete_metering_label_rule(
context, rule_id)
data = self.get_sync_data_metering(context)
self.meter_rpc.update_metering_label_rules(context, data)
return rule

View File

@ -1,162 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import exceptions as n_exc
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
serviceprovider_opts = [
cfg.MultiStrOpt('service_provider', default=[],
help=_('Defines providers for advanced services '
'using the format: '
'<service_type>:<name>:<driver>[:default]'))
]
cfg.CONF.register_opts(serviceprovider_opts, 'service_providers')
#global scope function that should be used in service APIs
def normalize_provider_name(name):
return name.lower()
def parse_service_provider_opt():
"""Parse service definition opts and returns result."""
def validate_name(name):
if len(name) > 255:
raise n_exc.Invalid(
_("Provider name is limited by 255 characters: %s") % name)
svc_providers_opt = cfg.CONF.service_providers.service_provider
res = []
for prov_def in svc_providers_opt:
split = prov_def.split(':')
try:
svc_type, name, driver = split[:3]
except ValueError:
raise n_exc.Invalid(_("Invalid service provider format"))
validate_name(name)
name = normalize_provider_name(name)
default = False
if len(split) == 4 and split[3]:
if split[3] == 'default':
default = True
else:
msg = (_("Invalid provider format. "
"Last part should be 'default' or empty: %s") %
prov_def)
LOG.error(msg)
raise n_exc.Invalid(msg)
if svc_type not in constants.ALLOWED_SERVICES:
msg = (_("Service type '%(svc_type)s' is not allowed, "
"allowed types: %(allowed)s") %
{'svc_type': svc_type,
'allowed': constants.ALLOWED_SERVICES})
LOG.error(msg)
raise n_exc.Invalid(msg)
res.append({'service_type': svc_type,
'name': name,
'driver': driver,
'default': default})
return res
class ServiceProviderNotFound(n_exc.InvalidInput):
message = _("Service provider '%(provider)s' could not be found "
"for service type %(service_type)s")
class DefaultServiceProviderNotFound(n_exc.InvalidInput):
message = _("Service type %(service_type)s does not have a default "
"service provider")
class ServiceProviderAlreadyAssociated(n_exc.Conflict):
message = _("Resource '%(resource_id)s' is already associated with "
"provider '%(provider)s' for service type '%(service_type)s'")
class ProviderConfiguration(object):
def __init__(self, prov_data):
self.providers = {}
for prov in prov_data:
self.add_provider(prov)
def _ensure_driver_unique(self, driver):
for k, v in self.providers.items():
if v['driver'] == driver:
msg = (_("Driver %s is not unique across providers") %
driver)
LOG.exception(msg)
raise n_exc.Invalid(msg)
def _ensure_default_unique(self, type, default):
if not default:
return
for k, v in self.providers.items():
if k[0] == type and v['default']:
msg = _("Multiple default providers "
"for service %s") % type
LOG.exception(msg)
raise n_exc.Invalid(msg)
def add_provider(self, provider):
self._ensure_driver_unique(provider['driver'])
self._ensure_default_unique(provider['service_type'],
provider['default'])
provider_type = (provider['service_type'], provider['name'])
if provider_type in self.providers:
msg = (_("Multiple providers specified for service "
"%s") % provider['service_type'])
LOG.exception(msg)
raise n_exc.Invalid(msg)
self.providers[provider_type] = {'driver': provider['driver'],
'default': provider['default']}
def _check_entry(self, k, v, filters):
# small helper to deal with query filters
if not filters:
return True
for index, key in enumerate(['service_type', 'name']):
if key in filters:
if k[index] not in filters[key]:
return False
for key in ['driver', 'default']:
if key in filters:
if v[key] not in filters[key]:
return False
return True
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def get_service_providers(self, filters=None, fields=None):
return [self._fields({'service_type': k[0],
'name': k[1],
'driver': v['driver'],
'default': v['default']},
fields)
for k, v in self.providers.items()
if self._check_entry(k, v, filters)]

View File

@ -1,18 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard

View File

@ -1,148 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent import l3_agent
from neutron.extensions import vpnaas
from neutron.openstack.common import importutils
vpn_agent_opts = [
cfg.MultiStrOpt(
'vpn_device_driver',
default=['neutron.services.vpn.device_drivers.'
'ipsec.OpenSwanDriver'],
help=_("The vpn device drivers Neutron will use")),
]
cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent')
class VPNAgent(l3_agent.L3NATAgentWithStateReport):
"""VPNAgent class which can handle vpn service drivers."""
def __init__(self, host, conf=None):
super(VPNAgent, self).__init__(host=host, conf=conf)
self.setup_device_drivers(host)
def setup_device_drivers(self, host):
"""Setting up device drivers.
:param host: hostname. This is needed for rpc
Each devices will stays as processes.
They will communiate with
server side service plugin using rpc with
device specific rpc topic.
:returns: None
"""
device_drivers = cfg.CONF.vpnagent.vpn_device_driver
self.devices = []
for device_driver in device_drivers:
try:
self.devices.append(
importutils.import_object(device_driver, self, host))
except ImportError:
raise vpnaas.DeviceDriverImportError(
device_driver=device_driver)
def get_namespace(self, router_id):
"""Get namespace of router.
:router_id: router_id
:returns: namespace string.
Note if the router is not exist, this function
returns None
"""
router_info = self.router_info.get(router_id)
if not router_info:
return
return router_info.ns_name
def add_nat_rule(self, router_id, chain, rule, top=False):
"""Add nat rule in namespace.
:param router_id: router_id
:param chain: a string of chain name
:param rule: a string of rule
:param top: if top is true, the rule
will be placed on the top of chain
Note if there is no rotuer, this method do nothing
"""
router_info = self.router_info.get(router_id)
if not router_info:
return
router_info.iptables_manager.ipv4['nat'].add_rule(
chain, rule, top=top)
def remove_nat_rule(self, router_id, chain, rule, top=False):
"""Remove nat rule in namespace.
:param router_id: router_id
:param chain: a string of chain name
:param rule: a string of rule
:param top: unused
needed to have same argument with add_nat_rule
"""
router_info = self.router_info.get(router_id)
if not router_info:
return
router_info.iptables_manager.ipv4['nat'].remove_rule(
chain, rule, top=top)
def iptables_apply(self, router_id):
"""Apply IPtables.
:param router_id: router_id
This method do nothing if there is no router
"""
router_info = self.router_info.get(router_id)
if not router_info:
return
router_info.iptables_manager.apply()
def _router_added(self, router_id, router):
"""Router added event.
This method overwrites parent class method.
:param router_id: id of added router
:param router: dict of rotuer
"""
super(VPNAgent, self)._router_added(router_id, router)
for device in self.devices:
device.create_router(router_id)
def _router_removed(self, router_id):
"""Router removed event.
This method overwrites parent class method.
:param router_id: id of removed router
"""
super(VPNAgent, self)._router_removed(router_id)
for device in self.devices:
device.destroy_router(router_id)
def _process_routers(self, routers, all_routers=False):
"""Router sync event.
This method overwrites parent class method.
:param routers: list of routers
"""
super(VPNAgent, self)._process_routers(routers, all_routers)
for device in self.devices:
device.sync(self.context, routers)
def main():
l3_agent.main(
manager='neutron.services.vpn.agent.VPNAgent')

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,22 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
IPSEC_DRIVER_TOPIC = 'ipsec_driver'
IPSEC_AGENT_TOPIC = 'ipsec_agent'
CISCO_IPSEC_DRIVER_TOPIC = 'cisco_csr_ipsec_driver'
CISCO_IPSEC_AGENT_TOPIC = 'cisco_csr_ipsec_agent'

View File

@ -1,38 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DeviceDriver(object):
def __init__(self, agent, host):
pass
@abc.abstractmethod
def sync(self, context, processes):
pass
@abc.abstractmethod
def create_router(self, process_id):
pass
@abc.abstractmethod
def destroy_router(self, process_id):
pass

View File

@ -1,258 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Paul Michali, Cisco Systems, Inc.
import time
import netaddr
import requests
from requests import exceptions as r_exc
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
TIMEOUT = 20.0
LOG = logging.getLogger(__name__)
HEADER_CONTENT_TYPE_JSON = {'content-type': 'application/json'}
URL_BASE = 'https://%(host)s/api/v1/%(resource)s'
def make_route_id(cidr, interface):
"""Build ID that will be used to identify route for later deletion."""
net = netaddr.IPNetwork(cidr)
return '%(network)s_%(prefix)s_%(interface)s' % {
'network': net.network,
'prefix': net.prefixlen,
'interface': interface}
class CsrRestClient(object):
"""REST CsrRestClient for accessing the Cisco Cloud Services Router."""
def __init__(self, host, tunnel_ip, username, password, timeout=None):
self.host = host
self.tunnel_ip = tunnel_ip
self.auth = (username, password)
self.token = None
self.status = requests.codes.OK
self.timeout = timeout
self.max_tries = 5
self.session = requests.Session()
def _response_info_for(self, response, method):
"""Return contents or location from response.
For a POST or GET with a 200 response, the response content
is returned.
For a POST with a 201 response, return the header's location,
which contains the identifier for the created resource.
If there is an error, return the response content, so that
it can be used in error processing ('error-code', 'error-message',
and 'detail' fields).
"""
if method in ('POST', 'GET') and self.status == requests.codes.OK:
LOG.debug(_('RESPONSE: %s'), response.json())
return response.json()
if method == 'POST' and self.status == requests.codes.CREATED:
return response.headers.get('location', '')
if self.status >= requests.codes.BAD_REQUEST and response.content:
if 'error-code' in response.content:
content = jsonutils.loads(response.content)
LOG.debug("Error response content %s", content)
return content
def _request(self, method, url, **kwargs):
"""Perform REST request and save response info."""
try:
LOG.debug(_("%(method)s: Request for %(resource)s payload: "
"%(payload)s"),
{'method': method.upper(), 'resource': url,
'payload': kwargs.get('data')})
start_time = time.time()
response = self.session.request(method, url, verify=False,
timeout=self.timeout, **kwargs)
LOG.debug(_("%(method)s Took %(time).2f seconds to process"),
{'method': method.upper(),
'time': time.time() - start_time})
except (r_exc.Timeout, r_exc.SSLError) as te:
# Should never see SSLError, unless requests package is old (<2.0)
timeout_val = 0.0 if self.timeout is None else self.timeout
LOG.warning(_("%(method)s: Request timeout%(ssl)s "
"(%(timeout).3f sec) for CSR(%(host)s)"),
{'method': method,
'timeout': timeout_val,
'ssl': '(SSLError)'
if isinstance(te, r_exc.SSLError) else '',
'host': self.host})
self.status = requests.codes.REQUEST_TIMEOUT
except r_exc.ConnectionError:
LOG.exception(_("%(method)s: Unable to connect to CSR(%(host)s)"),
{'method': method, 'host': self.host})
self.status = requests.codes.NOT_FOUND
except Exception as e:
LOG.error(_("%(method)s: Unexpected error for CSR (%(host)s): "
"%(error)s"),
{'method': method, 'host': self.host, 'error': e})
self.status = requests.codes.INTERNAL_SERVER_ERROR
else:
self.status = response.status_code
LOG.debug(_("%(method)s: Completed [%(status)s]"),
{'method': method, 'status': self.status})
return self._response_info_for(response, method)
def authenticate(self):
"""Obtain a token to use for subsequent CSR REST requests.
This is called when there is no token yet, or if the token has expired
and attempts to use it resulted in an UNAUTHORIZED REST response.
"""
url = URL_BASE % {'host': self.host, 'resource': 'auth/token-services'}
headers = {'Content-Length': '0',
'Accept': 'application/json'}
headers.update(HEADER_CONTENT_TYPE_JSON)
LOG.debug(_("%(auth)s with CSR %(host)s"),
{'auth': 'Authenticating' if self.token is None
else 'Reauthenticating', 'host': self.host})
self.token = None
response = self._request("POST", url, headers=headers, auth=self.auth)
if response:
self.token = response['token-id']
LOG.debug(_("Successfully authenticated with CSR %s"), self.host)
return True
LOG.error(_("Failed authentication with CSR %(host)s [%(status)s]"),
{'host': self.host, 'status': self.status})
def _do_request(self, method, resource, payload=None, more_headers=None,
full_url=False):
"""Perform a REST request to a CSR resource.
If this is the first time interacting with the CSR, a token will
be obtained. If the request fails, due to an expired token, the
token will be obtained and the request will be retried once more.
"""
if self.token is None:
if not self.authenticate():
return
if full_url:
url = resource
else:
url = ('https://%(host)s/api/v1/%(resource)s' %
{'host': self.host, 'resource': resource})
headers = {'Accept': 'application/json', 'X-auth-token': self.token}
if more_headers:
headers.update(more_headers)
if payload:
payload = jsonutils.dumps(payload)
response = self._request(method, url, data=payload, headers=headers)
if self.status == requests.codes.UNAUTHORIZED:
if not self.authenticate():
return
headers['X-auth-token'] = self.token
response = self._request(method, url, data=payload,
headers=headers)
if self.status != requests.codes.REQUEST_TIMEOUT:
return response
LOG.error(_("%(method)s: Request timeout for CSR(%(host)s)"),
{'method': method, 'host': self.host})
def get_request(self, resource, full_url=False):
"""Perform a REST GET requests for a CSR resource."""
return self._do_request('GET', resource, full_url=full_url)
def post_request(self, resource, payload=None):
"""Perform a POST request to a CSR resource."""
return self._do_request('POST', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def put_request(self, resource, payload=None):
"""Perform a PUT request to a CSR resource."""
return self._do_request('PUT', resource, payload=payload,
more_headers=HEADER_CONTENT_TYPE_JSON)
def delete_request(self, resource):
"""Perform a DELETE request on a CSR resource."""
return self._do_request('DELETE', resource,
more_headers=HEADER_CONTENT_TYPE_JSON)
def create_ike_policy(self, policy_info):
base_ike_policy_info = {u'version': u'v1',
u'local-auth-method': u'pre-share'}
base_ike_policy_info.update(policy_info)
return self.post_request('vpn-svc/ike/policies',
payload=base_ike_policy_info)
def create_ipsec_policy(self, policy_info):
base_ipsec_policy_info = {u'mode': u'tunnel'}
base_ipsec_policy_info.update(policy_info)
return self.post_request('vpn-svc/ipsec/policies',
payload=base_ipsec_policy_info)
def create_pre_shared_key(self, psk_info):
return self.post_request('vpn-svc/ike/keyrings', payload=psk_info)
def create_ipsec_connection(self, connection_info):
base_conn_info = {u'vpn-type': u'site-to-site',
u'ip-version': u'ipv4'}
connection_info.update(base_conn_info)
return self.post_request('vpn-svc/site-to-site',
payload=connection_info)
def configure_ike_keepalive(self, keepalive_info):
base_keepalive_info = {u'periodic': True}
keepalive_info.update(base_keepalive_info)
return self.put_request('vpn-svc/ike/keepalive', keepalive_info)
def create_static_route(self, route_info):
return self.post_request('routing-svc/static-routes',
payload=route_info)
def delete_static_route(self, route_id):
return self.delete_request('routing-svc/static-routes/%s' % route_id)
def set_ipsec_connection_state(self, tunnel, admin_up=True):
"""Set the IPSec site-to-site connection (tunnel) admin state.
Note: When a tunnel is created, it will be admin up.
"""
info = {u'vpn-interface-name': tunnel, u'enabled': admin_up}
return self.put_request('vpn-svc/site-to-site/%s/state' % tunnel, info)
def delete_ipsec_connection(self, conn_id):
return self.delete_request('vpn-svc/site-to-site/%s' % conn_id)
def delete_ipsec_policy(self, policy_id):
return self.delete_request('vpn-svc/ipsec/policies/%s' % policy_id)
def delete_ike_policy(self, policy_id):
return self.delete_request('vpn-svc/ike/policies/%s' % policy_id)
def delete_pre_shared_key(self, key_id):
return self.delete_request('vpn-svc/ike/keyrings/%s' % key_id)
def read_tunnel_statuses(self):
results = self.get_request('vpn-svc/site-to-site/active/sessions')
if self.status != requests.codes.OK or not results:
return []
tunnels = [(t[u'vpn-interface-name'], t[u'status'])
for t in results['items']]
return tunnels

View File

@ -1,858 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Paul Michali, Cisco Systems, Inc.
import abc
import collections
import requests
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from neutron.common import exceptions
from neutron.common import rpc_compat
from neutron import context as ctx
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from neutron.services.vpn.common import topics
from neutron.services.vpn import device_drivers
from neutron.services.vpn.device_drivers import (
cisco_csr_rest_client as csr_client)
ipsec_opts = [
cfg.IntOpt('status_check_interval',
default=60,
help=_("Status check interval for Cisco CSR IPSec connections"))
]
cfg.CONF.register_opts(ipsec_opts, 'cisco_csr_ipsec')
LOG = logging.getLogger(__name__)
RollbackStep = collections.namedtuple('RollbackStep',
['action', 'resource_id', 'title'])
class CsrResourceCreateFailure(exceptions.NeutronException):
message = _("Cisco CSR failed to create %(resource)s (%(which)s)")
class CsrAdminStateChangeFailure(exceptions.NeutronException):
message = _("Cisco CSR failed to change %(tunnel)s admin state to "
"%(state)s")
class CsrDriverMismatchError(exceptions.NeutronException):
message = _("Required %(resource)s attribute %(attr)s mapping for Cisco "
"CSR is missing in device driver")
class CsrUnknownMappingError(exceptions.NeutronException):
message = _("Device driver does not have a mapping of '%(value)s for "
"attribute %(attr)s of %(resource)s")
def find_available_csrs_from_config(config_files):
"""Read INI for available Cisco CSRs that driver can use.
Loads management port, tunnel IP, user, and password information for
available CSRs from configuration file. Driver will use this info to
configure VPN connections. The CSR is associated 1:1 with a Neutron
router. To identify which CSR to use for a VPN service, the public
(GW) IP of the Neutron router will be used as an index into the CSR
config info.
"""
multi_parser = cfg.MultiConfigParser()
LOG.info(_("Scanning config files %s for Cisco CSR configurations"),
config_files)
try:
read_ok = multi_parser.read(config_files)
except cfg.ParseError as pe:
LOG.error(_("Config file parse error: %s"), pe)
return {}
if len(read_ok) != len(config_files):
raise cfg.Error(_("Unable to parse config files %s for Cisco CSR "
"info") % config_files)
csrs_found = {}
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
device_type, sep, for_router = parsed_item.partition(':')
if device_type.lower() == 'cisco_csr_rest':
try:
netaddr.IPNetwork(for_router)
except netaddr.core.AddrFormatError:
LOG.error(_("Ignoring Cisco CSR configuration entry - "
"router IP %s is not valid"), for_router)
continue
entry = parsed_file[parsed_item]
# Check for missing fields
try:
rest_mgmt_ip = entry['rest_mgmt'][0]
tunnel_ip = entry['tunnel_ip'][0]
username = entry['username'][0]
password = entry['password'][0]
except KeyError as ke:
LOG.error(_("Ignoring Cisco CSR for router %(router)s "
"- missing %(field)s setting"),
{'router': for_router, 'field': str(ke)})
continue
# Validate fields
try:
timeout = float(entry['timeout'][0])
except ValueError:
LOG.error(_("Ignoring Cisco CSR for router %s - "
"timeout is not a floating point number"),
for_router)
continue
except KeyError:
timeout = csr_client.TIMEOUT
try:
netaddr.IPAddress(rest_mgmt_ip)
except netaddr.core.AddrFormatError:
LOG.error(_("Ignoring Cisco CSR for subnet %s - "
"REST management is not an IP address"),
for_router)
continue
try:
netaddr.IPAddress(tunnel_ip)
except netaddr.core.AddrFormatError:
LOG.error(_("Ignoring Cisco CSR for router %s - "
"local tunnel is not an IP address"),
for_router)
continue
csrs_found[for_router] = {'rest_mgmt': rest_mgmt_ip,
'tunnel_ip': tunnel_ip,
'username': username,
'password': password,
'timeout': timeout}
LOG.debug(_("Found CSR for router %(router)s: %(info)s"),
{'router': for_router,
'info': csrs_found[for_router]})
return csrs_found
class CiscoCsrIPsecVpnDriverApi(rpc_compat.RpcProxy):
"""RPC API for agent to plugin messaging."""
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices on this host.
The vpnservices including related ipsec_site_connection,
ikepolicy, ipsecpolicy, and Cisco info on this host.
"""
return self.call(context,
self.make_msg('get_vpn_services_on_host',
host=host),
topic=self.topic)
def update_status(self, context, status):
"""Update status for all VPN services and connections."""
return self.cast(context,
self.make_msg('update_status',
status=status),
topic=self.topic)
@six.add_metaclass(abc.ABCMeta)
class CiscoCsrIPsecDriver(device_drivers.DeviceDriver):
"""Cisco CSR VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now.
Insted of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
RPC_API_VERSION = '1.0'
# TODO(ihrachys): we can't use RpcCallback here due to inheritance
# issues
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, agent, host):
self.host = host
self.conn = rpc_compat.create_connection(new=True)
context = ctx.get_admin_context_without_session()
node_topic = '%s.%s' % (topics.CISCO_IPSEC_AGENT_TOPIC, self.host)
self.service_state = {}
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = (
CiscoCsrIPsecVpnDriverApi(topics.CISCO_IPSEC_DRIVER_TOPIC, '1.0'))
self.periodic_report = loopingcall.FixedIntervalLoopingCall(
self.report_status, context)
self.periodic_report.start(
interval=agent.conf.cisco_csr_ipsec.status_check_interval)
csrs_found = find_available_csrs_from_config(cfg.CONF.config_file)
if csrs_found:
LOG.info(_("Loaded %(num)d Cisco CSR configuration%(plural)s"),
{'num': len(csrs_found),
'plural': 's'[len(csrs_found) == 1:]})
else:
raise SystemExit(_('No Cisco CSR configurations found in: %s') %
cfg.CONF.config_file)
self.csrs = dict([(k, csr_client.CsrRestClient(v['rest_mgmt'],
v['tunnel_ip'],
v['username'],
v['password'],
v['timeout']))
for k, v in csrs_found.items()])
def vpnservice_updated(self, context, **kwargs):
"""Handle VPNaaS service driver change notifications."""
LOG.debug(_("Handling VPN service update notification '%s'"),
kwargs.get('reason', ''))
self.sync(context, [])
def create_vpn_service(self, service_data):
"""Create new entry to track VPN service and its connections."""
vpn_service_id = service_data['id']
vpn_service_router = service_data['external_ip']
self.service_state[vpn_service_id] = CiscoCsrVpnService(
service_data, self.csrs.get(vpn_service_router))
return self.service_state[vpn_service_id]
def update_connection(self, context, vpn_service_id, conn_data):
"""Handle notification for a single IPSec connection."""
vpn_service = self.service_state[vpn_service_id]
conn_id = conn_data['id']
conn_is_admin_up = conn_data[u'admin_state_up']
if conn_id in vpn_service.conn_state: # Existing connection...
ipsec_conn = vpn_service.conn_state[conn_id]
config_changed = ipsec_conn.check_for_changes(conn_data)
if config_changed:
LOG.debug(_("Update: Existing connection %s changed"), conn_id)
ipsec_conn.delete_ipsec_site_connection(context, conn_id)
ipsec_conn.create_ipsec_site_connection(context, conn_data)
ipsec_conn.conn_info = conn_data
if ipsec_conn.forced_down:
if vpn_service.is_admin_up and conn_is_admin_up:
LOG.debug(_("Update: Connection %s no longer admin down"),
conn_id)
ipsec_conn.set_admin_state(is_up=True)
ipsec_conn.forced_down = False
else:
if not vpn_service.is_admin_up or not conn_is_admin_up:
LOG.debug(_("Update: Connection %s forced to admin down"),
conn_id)
ipsec_conn.set_admin_state(is_up=False)
ipsec_conn.forced_down = True
else: # New connection...
ipsec_conn = vpn_service.create_connection(conn_data)
ipsec_conn.create_ipsec_site_connection(context, conn_data)
if not vpn_service.is_admin_up or not conn_is_admin_up:
LOG.debug(_("Update: Created new connection %s in admin down "
"state"), conn_id)
ipsec_conn.set_admin_state(is_up=False)
ipsec_conn.forced_down = True
else:
LOG.debug(_("Update: Created new connection %s"), conn_id)
ipsec_conn.is_dirty = False
ipsec_conn.last_status = conn_data['status']
ipsec_conn.is_admin_up = conn_is_admin_up
return ipsec_conn
def update_service(self, context, service_data):
"""Handle notification for a single VPN Service and its connections."""
vpn_service_id = service_data['id']
csr_id = service_data['external_ip']
if csr_id not in self.csrs:
LOG.error(_("Update: Skipping VPN service %(service)s as it's "
"router (%(csr_id)s is not associated with a Cisco "
"CSR"), {'service': vpn_service_id, 'csr_id': csr_id})
return
if vpn_service_id in self.service_state:
LOG.debug(_("Update: Existing VPN service %s detected"),
vpn_service_id)
vpn_service = self.service_state[vpn_service_id]
else:
LOG.debug(_("Update: New VPN service %s detected"), vpn_service_id)
vpn_service = self.create_vpn_service(service_data)
vpn_service.is_dirty = False
vpn_service.connections_removed = False
vpn_service.last_status = service_data['status']
vpn_service.is_admin_up = service_data[u'admin_state_up']
for conn_data in service_data['ipsec_conns']:
self.update_connection(context, vpn_service_id, conn_data)
LOG.debug(_("Update: Completed update processing"))
return vpn_service
def update_all_services_and_connections(self, context):
"""Update services and connections based on plugin info.
Perform any create and update operations and then update status.
Mark every visited connection as no longer "dirty" so they will
not be deleted at end of sync processing.
"""
services_data = self.agent_rpc.get_vpn_services_on_host(context,
self.host)
LOG.debug("Sync updating for %d VPN services", len(services_data))
vpn_services = []
for service_data in services_data:
vpn_service = self.update_service(context, service_data)
if vpn_service:
vpn_services.append(vpn_service)
return vpn_services
def mark_existing_connections_as_dirty(self):
"""Mark all existing connections as "dirty" for sync."""
service_count = 0
connection_count = 0
for service_state in self.service_state.values():
service_state.is_dirty = True
service_count += 1
for conn_id in service_state.conn_state:
service_state.conn_state[conn_id].is_dirty = True
connection_count += 1
LOG.debug(_("Mark: %(service)d VPN services and %(conn)d IPSec "
"connections marked dirty"), {'service': service_count,
'conn': connection_count})
def remove_unknown_connections(self, context):
"""Remove connections that are not known by service driver."""
service_count = 0
connection_count = 0
for vpn_service_id, vpn_service in self.service_state.items():
dirty = [c_id for c_id, c in vpn_service.conn_state.items()
if c.is_dirty]
vpn_service.connections_removed = len(dirty) > 0
for conn_id in dirty:
conn_state = vpn_service.conn_state[conn_id]
conn_state.delete_ipsec_site_connection(context, conn_id)
connection_count += 1
del vpn_service.conn_state[conn_id]
if vpn_service.is_dirty:
service_count += 1
del self.service_state[vpn_service_id]
elif dirty:
self.connections_removed = True
LOG.debug(_("Sweep: Removed %(service)d dirty VPN service%(splural)s "
"and %(conn)d dirty IPSec connection%(cplural)s"),
{'service': service_count, 'conn': connection_count,
'splural': 's'[service_count == 1:],
'cplural': 's'[connection_count == 1:]})
def build_report_for_connections_on(self, vpn_service):
"""Create the report fragment for IPSec connections on a service.
Collect the current status from the Cisco CSR and use that to update
the status and generate report fragment for each connection on the
service. If there is no status information, or no change, then no
report info will be created for the connection. The combined report
data is returned.
"""
LOG.debug(_("Report: Collecting status for IPSec connections on VPN "
"service %s"), vpn_service.service_id)
tunnels = vpn_service.get_ipsec_connections_status()
report = {}
for connection in vpn_service.conn_state.values():
if connection.forced_down:
LOG.debug(_("Connection %s forced down"), connection.conn_id)
current_status = constants.DOWN
else:
current_status = connection.find_current_status_in(tunnels)
LOG.debug(_("Connection %(conn)s reported %(status)s"),
{'conn': connection.conn_id,
'status': current_status})
frag = connection.update_status_and_build_report(current_status)
if frag:
LOG.debug(_("Report: Adding info for IPSec connection %s"),
connection.conn_id)
report.update(frag)
return report
def build_report_for_service(self, vpn_service):
"""Create the report info for a VPN service and its IPSec connections.
Get the report info for the connections on the service, and include
it into the report info for the VPN service. If there is no report
info for the connection, then no change has occurred and no report
will be generated. If there is only one connection for the service,
we'll set the service state to match the connection (with ERROR seen
as DOWN).
"""
conn_report = self.build_report_for_connections_on(vpn_service)
if conn_report or vpn_service.connections_removed:
pending_handled = plugin_utils.in_pending_status(
vpn_service.last_status)
vpn_service.update_last_status()
LOG.debug(_("Report: Adding info for VPN service %s"),
vpn_service.service_id)
return {u'id': vpn_service.service_id,
u'status': vpn_service.last_status,
u'updated_pending_status': pending_handled,
u'ipsec_site_connections': conn_report}
else:
return {}
@lockutils.synchronized('vpn-agent', 'neutron-')
def report_status(self, context):
"""Report status of all VPN services and IPSec connections to plugin.
This is called periodically by the agent, to push up changes in
status. Use a lock to serialize access to (and changing of)
running state.
"""
return self.report_status_internal(context)
def report_status_internal(self, context):
"""Generate report and send to plugin, if anything changed."""
service_report = []
LOG.debug(_("Report: Starting status report processing"))
for vpn_service_id, vpn_service in self.service_state.items():
LOG.debug(_("Report: Collecting status for VPN service %s"),
vpn_service_id)
report = self.build_report_for_service(vpn_service)
if report:
service_report.append(report)
if service_report:
LOG.info(_("Sending status report update to plugin"))
self.agent_rpc.update_status(context, service_report)
LOG.debug(_("Report: Completed status report processing"))
return service_report
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Synchronize with plugin and report current status.
Mark all "known" services/connections as dirty, update them based on
information from the plugin, remove (sweep) any connections that are
not updated (dirty), and report updates, if any, back to plugin.
Called when update/delete a service or create/update/delete a
connection (vpnservice_updated message), or router change
(_process_routers).
Use lock to serialize access (and changes) to running state for VPN
service and IPsec connections.
"""
self.mark_existing_connections_as_dirty()
self.update_all_services_and_connections(context)
self.remove_unknown_connections(context)
self.report_status_internal(context)
def create_router(self, process_id):
"""Actions taken when router created."""
# Note: Since Cisco CSR is running out-of-band, nothing to do here
pass
def destroy_router(self, process_id):
"""Actions taken when router deleted."""
# Note: Since Cisco CSR is running out-of-band, nothing to do here
pass
class CiscoCsrVpnService(object):
"""Maintains state/status information for a service and its connections."""
def __init__(self, service_data, csr):
self.service_id = service_data['id']
self.conn_state = {}
self.csr = csr
self.is_admin_up = True
# TODO(pcm) FUTURE - handle sharing of policies
def create_connection(self, conn_data):
conn_id = conn_data['id']
self.conn_state[conn_id] = CiscoCsrIPSecConnection(conn_data, self.csr)
return self.conn_state[conn_id]
def get_connection(self, conn_id):
return self.conn_state.get(conn_id)
def conn_status(self, conn_id):
conn_state = self.get_connection(conn_id)
if conn_state:
return conn_state.last_status
def snapshot_conn_state(self, ipsec_conn):
"""Create/obtain connection state and save current status."""
conn_state = self.conn_state.setdefault(
ipsec_conn['id'], CiscoCsrIPSecConnection(ipsec_conn, self.csr))
conn_state.last_status = ipsec_conn['status']
conn_state.is_dirty = False
return conn_state
STATUS_MAP = {'ERROR': constants.ERROR,
'UP-ACTIVE': constants.ACTIVE,
'UP-IDLE': constants.ACTIVE,
'UP-NO-IKE': constants.ACTIVE,
'DOWN': constants.DOWN,
'DOWN-NEGOTIATING': constants.DOWN}
def get_ipsec_connections_status(self):
"""Obtain current status of all tunnels on a Cisco CSR.
Convert them to OpenStack status values.
"""
tunnels = self.csr.read_tunnel_statuses()
for tunnel in tunnels:
LOG.debug("CSR Reports %(tunnel)s status '%(status)s'",
{'tunnel': tunnel[0], 'status': tunnel[1]})
return dict(map(lambda x: (x[0], self.STATUS_MAP[x[1]]), tunnels))
def find_matching_connection(self, tunnel_id):
"""Find IPSec connection using Cisco CSR tunnel specified, if any."""
for connection in self.conn_state.values():
if connection.tunnel == tunnel_id:
return connection.conn_id
def no_connections_up(self):
return not any(c.last_status == 'ACTIVE'
for c in self.conn_state.values())
def update_last_status(self):
if not self.is_admin_up or self.no_connections_up():
self.last_status = constants.DOWN
else:
self.last_status = constants.ACTIVE
class CiscoCsrIPSecConnection(object):
"""State and actions for IPSec site-to-site connections."""
def __init__(self, conn_info, csr):
self.conn_info = conn_info
self.csr = csr
self.steps = []
self.forced_down = False
self.changed = False
@property
def conn_id(self):
return self.conn_info['id']
@property
def is_admin_up(self):
return self.conn_info['admin_state_up']
@is_admin_up.setter
def is_admin_up(self, is_up):
self.conn_info['admin_state_up'] = is_up
@property
def tunnel(self):
return self.conn_info['cisco']['site_conn_id']
def check_for_changes(self, curr_conn):
return not all([self.conn_info[attr] == curr_conn[attr]
for attr in ('mtu', 'psk', 'peer_address',
'peer_cidrs', 'ike_policy',
'ipsec_policy', 'cisco')])
def find_current_status_in(self, statuses):
if self.tunnel in statuses:
return statuses[self.tunnel]
else:
return constants.ERROR
def update_status_and_build_report(self, current_status):
if current_status != self.last_status:
pending_handled = plugin_utils.in_pending_status(self.last_status)
self.last_status = current_status
return {self.conn_id: {'status': current_status,
'updated_pending_status': pending_handled}}
else:
return {}
DIALECT_MAP = {'ike_policy': {'name': 'IKE Policy',
'v1': u'v1',
# auth_algorithm -> hash
'sha1': u'sha',
# encryption_algorithm -> encryption
'3des': u'3des',
'aes-128': u'aes',
'aes-192': u'aes192',
'aes-256': u'aes256',
# pfs -> dhGroup
'group2': 2,
'group5': 5,
'group14': 14},
'ipsec_policy': {'name': 'IPSec Policy',
# auth_algorithm -> esp-authentication
'sha1': u'esp-sha-hmac',
# transform_protocol -> ah
'esp': None,
'ah': u'ah-sha-hmac',
'ah-esp': u'ah-sha-hmac',
# encryption_algorithm -> esp-encryption
'3des': u'esp-3des',
'aes-128': u'esp-aes',
'aes-192': u'esp-192-aes',
'aes-256': u'esp-256-aes',
# pfs -> pfs
'group2': u'group2',
'group5': u'group5',
'group14': u'group14'}}
def translate_dialect(self, resource, attribute, info):
"""Map VPNaaS attributes values to CSR values for a resource."""
name = self.DIALECT_MAP[resource]['name']
if attribute not in info:
raise CsrDriverMismatchError(resource=name, attr=attribute)
value = info[attribute].lower()
if value in self.DIALECT_MAP[resource]:
return self.DIALECT_MAP[resource][value]
raise CsrUnknownMappingError(resource=name, attr=attribute,
value=value)
def create_psk_info(self, psk_id, conn_info):
"""Collect/create attributes needed for pre-shared key."""
return {u'keyring-name': psk_id,
u'pre-shared-key-list': [
{u'key': conn_info['psk'],
u'encrypted': False,
u'peer-address': conn_info['peer_address']}]}
def create_ike_policy_info(self, ike_policy_id, conn_info):
"""Collect/create/map attributes needed for IKE policy."""
for_ike = 'ike_policy'
policy_info = conn_info[for_ike]
version = self.translate_dialect(for_ike,
'ike_version',
policy_info)
encrypt_algorithm = self.translate_dialect(for_ike,
'encryption_algorithm',
policy_info)
auth_algorithm = self.translate_dialect(for_ike,
'auth_algorithm',
policy_info)
group = self.translate_dialect(for_ike,
'pfs',
policy_info)
lifetime = policy_info['lifetime_value']
return {u'version': version,
u'priority-id': ike_policy_id,
u'encryption': encrypt_algorithm,
u'hash': auth_algorithm,
u'dhGroup': group,
u'lifetime': lifetime}
def create_ipsec_policy_info(self, ipsec_policy_id, info):
"""Collect/create attributes needed for IPSec policy.
Note: OpenStack will provide a default encryption algorithm, if one is
not provided, so a authentication only configuration of (ah, sha1),
which maps to ah-sha-hmac transform protocol, cannot be selected.
As a result, we'll always configure the encryption algorithm, and
will select ah-sha-hmac for transform protocol.
"""
for_ipsec = 'ipsec_policy'
policy_info = info[for_ipsec]
transform_protocol = self.translate_dialect(for_ipsec,
'transform_protocol',
policy_info)
auth_algorithm = self.translate_dialect(for_ipsec,
'auth_algorithm',
policy_info)
encrypt_algorithm = self.translate_dialect(for_ipsec,
'encryption_algorithm',
policy_info)
group = self.translate_dialect(for_ipsec, 'pfs', policy_info)
lifetime = policy_info['lifetime_value']
settings = {u'policy-id': ipsec_policy_id,
u'protection-suite': {
u'esp-encryption': encrypt_algorithm,
u'esp-authentication': auth_algorithm},
u'lifetime-sec': lifetime,
u'pfs': group,
u'anti-replay-window-size': u'disable'}
if transform_protocol:
settings[u'protection-suite'][u'ah'] = transform_protocol
return settings
def create_site_connection_info(self, site_conn_id, ipsec_policy_id,
conn_info):
"""Collect/create attributes needed for the IPSec connection."""
# TODO(pcm) Enable, once CSR is embedded as a Neutron router
# gw_ip = vpnservice['external_ip'] (need to pass in)
mtu = conn_info['mtu']
return {
u'vpn-interface-name': site_conn_id,
u'ipsec-policy-id': ipsec_policy_id,
u'local-device': {
# TODO(pcm): FUTURE - Get CSR port of interface with
# local subnet
u'ip-address': u'GigabitEthernet3',
# TODO(pcm): FUTURE - Get IP address of router's public
# I/F, once CSR is used as embedded router.
u'tunnel-ip-address': self.csr.tunnel_ip
# u'tunnel-ip-address': u'%s' % gw_ip
},
u'remote-device': {
u'tunnel-ip-address': conn_info['peer_address']
},
u'mtu': mtu
}
def create_routes_info(self, site_conn_id, conn_info):
"""Collect/create attributes for static routes."""
routes_info = []
for peer_cidr in conn_info.get('peer_cidrs', []):
route = {u'destination-network': peer_cidr,
u'outgoing-interface': site_conn_id}
route_id = csr_client.make_route_id(peer_cidr, site_conn_id)
routes_info.append((route_id, route))
return routes_info
def _check_create(self, resource, which):
"""Determine if REST create request was successful."""
if self.csr.status == requests.codes.CREATED:
LOG.debug("%(resource)s %(which)s is configured",
{'resource': resource, 'which': which})
return
LOG.error(_("Unable to create %(resource)s %(which)s: "
"%(status)d"),
{'resource': resource, 'which': which,
'status': self.csr.status})
# ToDO(pcm): Set state to error
raise CsrResourceCreateFailure(resource=resource, which=which)
def do_create_action(self, action_suffix, info, resource_id, title):
"""Perform a single REST step for IPSec site connection create."""
create_action = 'create_%s' % action_suffix
try:
getattr(self.csr, create_action)(info)
except AttributeError:
LOG.exception(_("Internal error - '%s' is not defined"),
create_action)
raise CsrResourceCreateFailure(resource=title,
which=resource_id)
self._check_create(title, resource_id)
self.steps.append(RollbackStep(action_suffix, resource_id, title))
def _verify_deleted(self, status, resource, which):
"""Determine if REST delete request was successful."""
if status in (requests.codes.NO_CONTENT, requests.codes.NOT_FOUND):
LOG.debug("%(resource)s configuration %(which)s was removed",
{'resource': resource, 'which': which})
else:
LOG.warning(_("Unable to delete %(resource)s %(which)s: "
"%(status)d"), {'resource': resource,
'which': which,
'status': status})
def do_rollback(self):
"""Undo create steps that were completed successfully."""
for step in reversed(self.steps):
delete_action = 'delete_%s' % step.action
LOG.debug(_("Performing rollback action %(action)s for "
"resource %(resource)s"), {'action': delete_action,
'resource': step.title})
try:
getattr(self.csr, delete_action)(step.resource_id)
except AttributeError:
LOG.exception(_("Internal error - '%s' is not defined"),
delete_action)
raise CsrResourceCreateFailure(resource=step.title,
which=step.resource_id)
self._verify_deleted(self.csr.status, step.title, step.resource_id)
self.steps = []
def create_ipsec_site_connection(self, context, conn_info):
"""Creates an IPSec site-to-site connection on CSR.
Create the PSK, IKE policy, IPSec policy, connection, static route,
and (future) DPD.
"""
# Get all the IDs
conn_id = conn_info['id']
psk_id = conn_id
site_conn_id = conn_info['cisco']['site_conn_id']
ike_policy_id = conn_info['cisco']['ike_policy_id']
ipsec_policy_id = conn_info['cisco']['ipsec_policy_id']
LOG.debug(_('Creating IPSec connection %s'), conn_id)
# Get all the attributes needed to create
try:
psk_info = self.create_psk_info(psk_id, conn_info)
ike_policy_info = self.create_ike_policy_info(ike_policy_id,
conn_info)
ipsec_policy_info = self.create_ipsec_policy_info(ipsec_policy_id,
conn_info)
connection_info = self.create_site_connection_info(site_conn_id,
ipsec_policy_id,
conn_info)
routes_info = self.create_routes_info(site_conn_id, conn_info)
except (CsrUnknownMappingError, CsrDriverMismatchError) as e:
LOG.exception(e)
return
try:
self.do_create_action('pre_shared_key', psk_info,
conn_id, 'Pre-Shared Key')
self.do_create_action('ike_policy', ike_policy_info,
ike_policy_id, 'IKE Policy')
self.do_create_action('ipsec_policy', ipsec_policy_info,
ipsec_policy_id, 'IPSec Policy')
self.do_create_action('ipsec_connection', connection_info,
site_conn_id, 'IPSec Connection')
# TODO(pcm): FUTURE - Do DPD for v1 and handle if >1 connection
# and different DPD settings
for route_id, route_info in routes_info:
self.do_create_action('static_route', route_info,
route_id, 'Static Route')
except CsrResourceCreateFailure:
self.do_rollback()
LOG.info(_("FAILED: Create of IPSec site-to-site connection %s"),
conn_id)
else:
LOG.info(_("SUCCESS: Created IPSec site-to-site connection %s"),
conn_id)
def delete_ipsec_site_connection(self, context, conn_id):
"""Delete the site-to-site IPSec connection.
This will be best effort and will continue, if there are any
failures.
"""
LOG.debug(_('Deleting IPSec connection %s'), conn_id)
if not self.steps:
LOG.warning(_('Unable to find connection %s'), conn_id)
else:
self.do_rollback()
LOG.info(_("SUCCESS: Deleted IPSec site-to-site connection %s"),
conn_id)
def set_admin_state(self, is_up):
"""Change the admin state for the IPSec connection."""
self.csr.set_ipsec_connection_state(self.tunnel, admin_up=is_up)
if self.csr.status != requests.codes.NO_CONTENT:
state = "UP" if is_up else "DOWN"
LOG.error(_("Unable to change %(tunnel)s admin state to "
"%(state)s"), {'tunnel': self.tunnel, 'state': state})
raise CsrAdminStateChangeFailure(tunnel=self.tunnel, state=state)

View File

@ -1,713 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import os
import re
import shutil
import jinja2
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import rpc_compat
from neutron import context
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from neutron.services.vpn.common import topics
from neutron.services.vpn import device_drivers
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(__file__)
ipsec_opts = [
cfg.StrOpt(
'config_base_dir',
default='$state_path/ipsec',
help=_('Location to store ipsec server config files')),
cfg.IntOpt('ipsec_status_check_interval',
default=60,
help=_("Interval for checking ipsec status"))
]
cfg.CONF.register_opts(ipsec_opts, 'ipsec')
openswan_opts = [
cfg.StrOpt(
'ipsec_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.conf.template'),
help=_('Template file for ipsec configuration')),
cfg.StrOpt(
'ipsec_secret_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.secret.template'),
help=_('Template file for ipsec secret configuration'))
]
cfg.CONF.register_opts(openswan_opts, 'openswan')
JINJA_ENV = None
STATUS_MAP = {
'erouted': constants.ACTIVE,
'unrouted': constants.DOWN
}
IPSEC_CONNS = 'ipsec_site_connections'
def _get_template(template_file):
global JINJA_ENV
if not JINJA_ENV:
templateLoader = jinja2.FileSystemLoader(searchpath="/")
JINJA_ENV = jinja2.Environment(loader=templateLoader)
return JINJA_ENV.get_template(template_file)
@six.add_metaclass(abc.ABCMeta)
class BaseSwanProcess():
"""Swan Family Process Manager
This class manages start/restart/stop ipsec process.
This class create/delete config template
"""
binary = "ipsec"
CONFIG_DIRS = [
'var/run',
'log',
'etc',
'etc/ipsec.d/aacerts',
'etc/ipsec.d/acerts',
'etc/ipsec.d/cacerts',
'etc/ipsec.d/certs',
'etc/ipsec.d/crls',
'etc/ipsec.d/ocspcerts',
'etc/ipsec.d/policies',
'etc/ipsec.d/private',
'etc/ipsec.d/reqs',
'etc/pki/nssdb/'
]
DIALECT_MAP = {
"3des": "3des",
"aes-128": "aes128",
"aes-256": "aes256",
"aes-192": "aes192",
"group2": "modp1024",
"group5": "modp1536",
"group14": "modp2048",
"group15": "modp3072",
"bi-directional": "start",
"response-only": "add",
"v2": "insist",
"v1": "never"
}
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace):
self.conf = conf
self.id = process_id
self.root_helper = root_helper
self.updated_pending_status = False
self.namespace = namespace
self.connection_status = {}
self.config_dir = os.path.join(
cfg.CONF.ipsec.config_base_dir, self.id)
self.etc_dir = os.path.join(self.config_dir, 'etc')
self.update_vpnservice(vpnservice)
def translate_dialect(self):
if not self.vpnservice:
return
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
self._dialect(ipsec_site_conn, 'initiator')
self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version')
for key in ['encryption_algorithm',
'auth_algorithm',
'pfs']:
self._dialect(ipsec_site_conn['ikepolicy'], key)
self._dialect(ipsec_site_conn['ipsecpolicy'], key)
def update_vpnservice(self, vpnservice):
self.vpnservice = vpnservice
self.translate_dialect()
def _dialect(self, obj, key):
obj[key] = self.DIALECT_MAP.get(obj[key], obj[key])
@abc.abstractmethod
def ensure_configs(self):
pass
def ensure_config_file(self, kind, template, vpnservice):
"""Update config file, based on current settings for service."""
config_str = self._gen_config_content(template, vpnservice)
config_file_name = self._get_config_filename(kind)
utils.replace_file(config_file_name, config_str)
def remove_config(self):
"""Remove whole config file."""
shutil.rmtree(self.config_dir, ignore_errors=True)
def _get_config_filename(self, kind):
config_dir = self.etc_dir
return os.path.join(config_dir, kind)
def _ensure_dir(self, dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path, 0o755)
def ensure_config_dir(self, vpnservice):
"""Create config directory if it does not exist."""
self._ensure_dir(self.config_dir)
for subdir in self.CONFIG_DIRS:
dir_path = os.path.join(self.config_dir, subdir)
self._ensure_dir(dir_path)
def _gen_config_content(self, template_file, vpnservice):
template = _get_template(template_file)
return template.render(
{'vpnservice': vpnservice,
'state_path': cfg.CONF.state_path})
@abc.abstractmethod
def get_status(self):
pass
@property
def status(self):
if self.active:
return constants.ACTIVE
return constants.DOWN
@property
def active(self):
"""Check if the process is active or not."""
if not self.namespace:
return False
try:
status = self.get_status()
self._update_connection_status(status)
except RuntimeError:
return False
return True
def update(self):
"""Update Status based on vpnservice configuration."""
if self.vpnservice and not self.vpnservice['admin_state_up']:
self.disable()
else:
self.enable()
if plugin_utils.in_pending_status(self.vpnservice['status']):
self.updated_pending_status = True
self.vpnservice['status'] = self.status
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if plugin_utils.in_pending_status(ipsec_site_conn['status']):
conn_id = ipsec_site_conn['id']
conn_status = self.connection_status.get(conn_id)
if not conn_status:
continue
conn_status['updated_pending_status'] = True
ipsec_site_conn['status'] = conn_status['status']
def enable(self):
"""Enabling the process."""
try:
self.ensure_configs()
if self.active:
self.restart()
else:
self.start()
except RuntimeError:
LOG.exception(
_("Failed to enable vpn process on router %s"),
self.id)
def disable(self):
"""Disabling the process."""
try:
if self.active:
self.stop()
self.remove_config()
except RuntimeError:
LOG.exception(
_("Failed to disable vpn process on router %s"),
self.id)
@abc.abstractmethod
def restart(self):
"""Restart process."""
@abc.abstractmethod
def start(self):
"""Start process."""
@abc.abstractmethod
def stop(self):
"""Stop process."""
def _update_connection_status(self, status_output):
for line in status_output.split('\n'):
m = re.search('\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);', line)
if not m:
continue
connection_id = m.group(1)
status = m.group(2)
if not self.connection_status.get(connection_id):
self.connection_status[connection_id] = {
'status': None,
'updated_pending_status': False
}
self.connection_status[
connection_id]['status'] = STATUS_MAP[status]
class OpenSwanProcess(BaseSwanProcess):
"""OpenSwan Process manager class.
This process class uses three commands
(1) ipsec pluto: IPsec IKE keying daemon
(2) ipsec addconn: Adds new ipsec addconn
(3) ipsec whack: control interface for IPSEC keying daemon
"""
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace):
super(OpenSwanProcess, self).__init__(
conf, root_helper, process_id,
vpnservice, namespace)
self.secrets_file = os.path.join(
self.etc_dir, 'ipsec.secrets')
self.config_file = os.path.join(
self.etc_dir, 'ipsec.conf')
self.pid_path = os.path.join(
self.config_dir, 'var', 'run', 'pluto')
def _execute(self, cmd, check_exit_code=True):
"""Execute command on namespace."""
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
return ip_wrapper.netns.execute(
cmd,
check_exit_code=check_exit_code)
def ensure_configs(self):
"""Generate config files which are needed for OpenSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
self.conf.openswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
self.conf.openswan.ipsec_secret_template,
self.vpnservice)
def get_status(self):
return self._execute([self.binary,
'whack',
'--ctlbase',
self.pid_path,
'--status'])
def restart(self):
"""Restart the process."""
self.stop()
self.start()
return
def _get_nexthop(self, address):
routes = self._execute(
['ip', 'route', 'get', address])
if routes.find('via') >= 0:
return routes.split(' ')[2]
return address
def _virtual_privates(self):
"""Returns line of virtual_privates.
virtual_private contains the networks
that are allowed as subnet for the remote client.
"""
virtual_privates = []
nets = [self.vpnservice['subnet']['cidr']]
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nets += ipsec_site_conn['peer_cidrs']
for net in nets:
version = netaddr.IPNetwork(net).version
virtual_privates.append('%%v%s:%s' % (version, net))
return ','.join(virtual_privates)
def start(self):
"""Start the process.
Note: if there is not namespace yet,
just do nothing, and wait next event.
"""
if not self.namespace:
return
virtual_private = self._virtual_privates()
#start pluto IKE keying daemon
self._execute([self.binary,
'pluto',
'--ctlbase', self.pid_path,
'--ipsecdir', self.etc_dir,
'--use-netkey',
'--uniqueids',
'--nat_traversal',
'--secretsfile', self.secrets_file,
'--virtual_private', virtual_private
])
#add connections
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nexthop = self._get_nexthop(ipsec_site_conn['peer_address'])
self._execute([self.binary,
'addconn',
'--ctlbase', '%s.ctl' % self.pid_path,
'--defaultroutenexthop', nexthop,
'--config', self.config_file,
ipsec_site_conn['id']
])
#TODO(nati) fix this when openswan is fixed
#Due to openswan bug, this command always exit with 3
#start whack ipsec keying daemon
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--listen',
], check_exit_code=False)
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if not ipsec_site_conn['initiator'] == 'start':
continue
#initiate ipsec connection
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', ipsec_site_conn['id'],
'--asynchronous',
'--initiate'
])
def disconnect(self):
if not self.namespace:
return
if not self.vpnservice:
return
for conn_id in self.connection_status:
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', '%s/0x1' % conn_id,
'--terminate'
])
def stop(self):
#Stop process using whack
#Note this will also stop pluto
self.disconnect()
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--shutdown',
])
#clean connection_status info
self.connection_status = {}
class IPsecVpnDriverApi(rpc_compat.RpcProxy):
"""IPSecVpnDriver RPC api."""
IPSEC_PLUGIN_VERSION = '1.0'
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
return self.call(context,
self.make_msg('get_vpn_services_on_host',
host=host),
version=self.IPSEC_PLUGIN_VERSION,
topic=self.topic)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
return self.cast(context,
self.make_msg('update_status',
status=status),
version=self.IPSEC_PLUGIN_VERSION,
topic=self.topic)
@six.add_metaclass(abc.ABCMeta)
class IPsecDriver(device_drivers.DeviceDriver):
"""VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now.
Insted of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
RPC_API_VERSION = '1.0'
# TODO(ihrachys): we can't use RpcCallback here due to inheritance
# issues
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, agent, host):
self.agent = agent
self.conf = self.agent.conf
self.root_helper = self.agent.root_helper
self.host = host
self.conn = rpc_compat.create_connection(new=True)
self.context = context.get_admin_context_without_session()
self.topic = topics.IPSEC_AGENT_TOPIC
node_topic = '%s.%s' % (self.topic, self.host)
self.processes = {}
self.process_status_cache = {}
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC, '1.0')
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=self.conf.ipsec.ipsec_status_check_interval)
def _update_nat(self, vpnservice, func):
"""Setting up nat rule in iptables.
We need to setup nat rule for ipsec packet.
:param vpnservice: vpnservices
:param func: self.add_nat_rule or self.remove_nat_rule
"""
local_cidr = vpnservice['subnet']['cidr']
router_id = vpnservice['router_id']
for ipsec_site_connection in vpnservice['ipsec_site_connections']:
for peer_cidr in ipsec_site_connection['peer_cidrs']:
func(
router_id,
'POSTROUTING',
'-s %s -d %s -m policy '
'--dir out --pol ipsec '
'-j ACCEPT ' % (local_cidr, peer_cidr),
top=True)
self.agent.iptables_apply(router_id)
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
self.sync(context, [])
@abc.abstractmethod
def create_process(self, process_id, vpnservice, namespace):
pass
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.processs
"""
process = self.processes.get(process_id)
if not process or not process.namespace:
namespace = self.agent.get_namespace(process_id)
process = self.create_process(
process_id,
vpnservice,
namespace)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
def create_router(self, process_id):
"""Handling create router event.
Agent calls this method, when the process namespace
is ready.
"""
if process_id in self.processes:
# In case of vpnservice is created
# before router's namespace
process = self.processes[process_id]
self._update_nat(process.vpnservice, self.agent.add_nat_rule)
process.enable()
def destroy_router(self, process_id):
"""Handling destroy_router event.
Agent calls this method, when the process namespace
is deleted.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
vpnservice = process.vpnservice
if vpnservice:
self._update_nat(vpnservice, self.agent.remove_nat_rule)
del self.processes[process_id]
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': process.updated_pending_status,
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
if status_changed_vpn_services:
self.agent_rpc.update_status(
context,
status_changed_vpn_services)
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
This driver takes simple sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
# Ensure the ipsec process is enabled
for vpnservice in vpnservices:
process = self.ensure_process(vpnservice['router_id'],
vpnservice=vpnservice)
self._update_nat(vpnservice, self.agent.add_nat_rule)
process.update()
# Delete any IPSec processes that are
# associated with routers, but are not running the VPN service.
for router in routers:
#We are using router id as process_id
process_id = router['id']
if process_id not in router_ids:
process = self.ensure_process(process_id)
self.destroy_router(process_id)
# Delete any IPSec processes running
# VPN that do not have an associated router.
process_ids = [process_id
for process_id in self.processes
if process_id not in router_ids]
for process_id in process_ids:
self.destroy_router(process_id)
self.report_status(context)
class OpenSwanDriver(IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return OpenSwanProcess(
self.conf,
self.root_helper,
process_id,
vpnservice,
namespace)

View File

@ -1,64 +0,0 @@
# Configuration for {{vpnservice.name}}
config setup
nat_traversal=yes
listen={{vpnservice.external_ip}}
conn %default
ikelifetime=480m
keylife=60m
keyingtries=%forever
{% for ipsec_site_connection in vpnservice.ipsec_site_connections if ipsec_site_connection.admin_state_up
%}conn {{ipsec_site_connection.id}}
# NOTE: a default route is required for %defaultroute to work...
left={{vpnservice.external_ip}}
leftid={{vpnservice.external_ip}}
auto={{ipsec_site_connection.initiator}}
# NOTE:REQUIRED
# [subnet]
leftsubnet={{vpnservice.subnet.cidr}}
# leftsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
leftnexthop=%defaultroute
######################
# ipsec_site_connections
######################
# [peer_address]
right={{ipsec_site_connection.peer_address}}
# [peer_id]
rightid={{ipsec_site_connection.peer_id}}
# [peer_cidrs]
rightsubnets={ {{ipsec_site_connection['peer_cidrs']|join(' ')}} }
# rightsubnet=networkA/netmaskA, networkB/netmaskB (IKEv2 only)
rightnexthop=%defaultroute
# [mtu]
# Note It looks like not supported in the strongswan driver
# ignore it now
# [dpd_action]
dpdaction={{ipsec_site_connection.dpd_action}}
# [dpd_interval]
dpddelay={{ipsec_site_connection.dpd_interval}}
# [dpd_timeout]
dpdtimeout={{ipsec_site_connection.dpd_timeout}}
# [auth_mode]
authby=secret
######################
# IKEPolicy params
######################
#ike version
ikev2={{ipsec_site_connection.ikepolicy.ike_version}}
# [encryption_algorithm]-[auth_algorithm]-[pfs]
ike={{ipsec_site_connection.ikepolicy.encryption_algorithm}}-{{ipsec_site_connection.ikepolicy.auth_algorithm}};{{ipsec_site_connection.ikepolicy.pfs}}
# [lifetime_value]
ikelifetime={{ipsec_site_connection.ikepolicy.lifetime_value}}s
# NOTE: it looks lifetime_units=kilobytes can't be enforced (could be seconds, hours, days...)
##########################
# IPsecPolicys params
##########################
# [transform_protocol]
auth={{ipsec_site_connection.ipsecpolicy.transform_protocol}}
# [encryption_algorithm]-[auth_algorithm]-[pfs]
phase2alg={{ipsec_site_connection.ipsecpolicy.encryption_algorithm}}-{{ipsec_site_connection.ipsecpolicy.auth_algorithm}};{{ipsec_site_connection.ipsecpolicy.pfs}}
# [encapsulation_mode]
type={{ipsec_site_connection.ipsecpolicy.encapsulation_mode}}
# [lifetime_value]
lifetime={{ipsec_site_connection.ipsecpolicy.lifetime_value}}s
# lifebytes=100000 if lifetime_units=kilobytes (IKEv2 only)
{% endfor %}

View File

@ -1,3 +0,0 @@
# Configuration for {{vpnservice.name}} {% for ipsec_site_connection in vpnservice.ipsec_site_connections %}
{{vpnservice.external_ip}} {{ipsec_site_connection.peer_id}} : PSK "{{ipsec_site_connection.psk}}"
{% endfor %}

View File

@ -1,107 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard
from neutron.db.vpn import vpn_db
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class VPNPlugin(vpn_db.VPNPluginDb):
"""Implementation of the VPN Service Plugin.
This class manages the workflow of VPNaaS request/response.
Most DB related works are implemented in class
vpn_db.VPNPluginDb.
"""
supported_extension_aliases = ["vpnaas", "service-type"]
class VPNDriverPlugin(VPNPlugin, vpn_db.VPNPluginRpcDbMixin):
"""VpnPlugin which supports VPN Service Drivers."""
#TODO(nati) handle ikepolicy and ipsecpolicy update usecase
def __init__(self):
super(VPNDriverPlugin, self).__init__()
# Load the service driver from neutron.conf.
drivers, default_provider = service_base.load_drivers(
constants.VPN, self)
LOG.info(_("VPN plugin using service driver: %s"), default_provider)
self.ipsec_driver = drivers[default_provider]
def _get_driver_for_vpnservice(self, vpnservice):
return self.ipsec_driver
def _get_driver_for_ipsec_site_connection(self, context,
ipsec_site_connection):
#TODO(nati) get vpnservice when we support service type framework
vpnservice = None
return self._get_driver_for_vpnservice(vpnservice)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_connection = super(
VPNDriverPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.create_ipsec_site_connection(context, ipsec_site_connection)
return ipsec_site_connection
def delete_ipsec_site_connection(self, context, ipsec_conn_id):
ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
super(VPNDriverPlugin, self).delete_ipsec_site_connection(
context, ipsec_conn_id)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.delete_ipsec_site_connection(context, ipsec_site_connection)
def update_ipsec_site_connection(
self, context,
ipsec_conn_id, ipsec_site_connection):
old_ipsec_site_connection = self.get_ipsec_site_connection(
context, ipsec_conn_id)
ipsec_site_connection = super(
VPNDriverPlugin, self).update_ipsec_site_connection(
context,
ipsec_conn_id,
ipsec_site_connection)
driver = self._get_driver_for_ipsec_site_connection(
context, ipsec_site_connection)
driver.update_ipsec_site_connection(
context, old_ipsec_site_connection, ipsec_site_connection)
return ipsec_site_connection
def update_vpnservice(self, context, vpnservice_id, vpnservice):
old_vpn_service = self.get_vpnservice(context, vpnservice_id)
new_vpn_service = super(
VPNDriverPlugin, self).update_vpnservice(context, vpnservice_id,
vpnservice)
driver = self._get_driver_for_vpnservice(old_vpn_service)
driver.update_vpnservice(context, old_vpn_service, new_vpn_service)
return new_vpn_service
def delete_vpnservice(self, context, vpnservice_id):
vpnservice = self._get_vpnservice(context, vpnservice_id)
super(VPNDriverPlugin, self).delete_vpnservice(context, vpnservice_id)
driver = self._get_driver_for_vpnservice(vpnservice)
driver.delete_vpnservice(context, vpnservice)

View File

@ -1,92 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.common import rpc_compat
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VpnDriver(object):
def __init__(self, service_plugin):
self.service_plugin = service_plugin
@property
def service_type(self):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(
self, context, old_vpnservice, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice):
pass
class BaseIPsecVpnAgentApi(rpc_compat.RpcProxy):
"""Base class for IPSec API to agent."""
def __init__(self, to_agent_topic, topic, default_version):
self.to_agent_topic = to_agent_topic
super(BaseIPsecVpnAgentApi, self).__init__(topic, default_version)
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
This method will find where is the router, and
dispatch notification for the agent.
"""
admin_context = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if not version:
version = self.RPC_API_VERSION
l3_agents = plugin.get_l3_agents_hosting_routers(
admin_context, [router_id],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
'%(method)s %(args)s'),
{'topic': self.to_agent_topic,
'host': l3_agent.host,
'method': method,
'args': kwargs})
self.cast(
context, self.make_msg(method, **kwargs),
version=version,
topic='%s.%s' % (self.to_agent_topic, l3_agent.host))
def vpnservice_updated(self, context, router_id, **kwargs):
"""Send update event of vpnservices."""
self._agent_notification(context, 'vpnservice_updated', router_id,
**kwargs)

View File

@ -1,239 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Paul Michali, Cisco Systems, Inc.
import sqlalchemy as sa
from sqlalchemy.orm import exc as sql_exc
from neutron.common import exceptions
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db.vpn import vpn_db
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# Note: Artificially limit these to reduce mapping table size and performance
# Tunnel can be 0..7FFFFFFF, IKE policy can be 1..10000, IPSec policy can be
# 1..31 characters long.
MAX_CSR_TUNNELS = 10000
MAX_CSR_IKE_POLICIES = 2000
MAX_CSR_IPSEC_POLICIES = 2000
TUNNEL = 'Tunnel'
IKE_POLICY = 'IKE Policy'
IPSEC_POLICY = 'IPSec Policy'
MAPPING_LIMITS = {TUNNEL: (0, MAX_CSR_TUNNELS),
IKE_POLICY: (1, MAX_CSR_IKE_POLICIES),
IPSEC_POLICY: (1, MAX_CSR_IPSEC_POLICIES)}
class CsrInternalError(exceptions.NeutronException):
message = _("Fatal - %(reason)s")
class IdentifierMap(model_base.BASEV2, models_v2.HasTenant):
"""Maps OpenStack IDs to compatible numbers for Cisco CSR."""
__tablename__ = 'cisco_csr_identifier_map'
ipsec_site_conn_id = sa.Column(sa.String(64),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
csr_tunnel_id = sa.Column(sa.Integer, nullable=False)
csr_ike_policy_id = sa.Column(sa.Integer, nullable=False)
csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False)
def get_next_available_id(session, table_field, id_type):
"""Find first unused id for the specified field in IdentifierMap table.
As entries are removed, find the first "hole" and return that as the
next available ID. To improve performance, artificially limit
the number of entries to a smaller range. Currently, these IDs are
globally unique. Could enhance in the future to be unique per router
(CSR).
"""
min_value = MAPPING_LIMITS[id_type][0]
max_value = MAPPING_LIMITS[id_type][1]
rows = session.query(table_field).order_by(table_field)
used_ids = set([row[0] for row in rows])
all_ids = set(range(min_value, max_value + min_value))
available_ids = all_ids - used_ids
if not available_ids:
msg = _("No available Cisco CSR %(type)s IDs from "
"%(min)d..%(max)d") % {'type': id_type,
'min': min_value,
'max': max_value}
LOG.error(msg)
raise IndexError(msg)
return available_ids.pop()
def get_next_available_tunnel_id(session):
"""Find first available tunnel ID from 0..MAX_CSR_TUNNELS-1."""
return get_next_available_id(session, IdentifierMap.csr_tunnel_id,
TUNNEL)
def get_next_available_ike_policy_id(session):
"""Find first available IKE Policy ID from 1..MAX_CSR_IKE_POLICIES."""
return get_next_available_id(session, IdentifierMap.csr_ike_policy_id,
IKE_POLICY)
def get_next_available_ipsec_policy_id(session):
"""Find first available IPSec Policy ID from 1..MAX_CSR_IKE_POLICIES."""
return get_next_available_id(session, IdentifierMap.csr_ipsec_policy_id,
IPSEC_POLICY)
def find_conn_with_policy(policy_field, policy_id, conn_id, session):
"""Return ID of another conneciton (if any) that uses same policy ID."""
qry = session.query(vpn_db.IPsecSiteConnection.id)
match = qry.filter_request(
policy_field == policy_id,
vpn_db.IPsecSiteConnection.id != conn_id).first()
if match:
return match[0]
def find_connection_using_ike_policy(ike_policy_id, conn_id, session):
"""Return ID of another connection that uses same IKE policy ID."""
return find_conn_with_policy(vpn_db.IPsecSiteConnection.ikepolicy_id,
ike_policy_id, conn_id, session)
def find_connection_using_ipsec_policy(ipsec_policy_id, conn_id, session):
"""Return ID of another connection that uses same IPSec policy ID."""
return find_conn_with_policy(vpn_db.IPsecSiteConnection.ipsecpolicy_id,
ipsec_policy_id, conn_id, session)
def lookup_policy(policy_type, policy_field, conn_id, session):
"""Obtain specified policy's mapping from other connection."""
try:
return session.query(policy_field).filter_by(
ipsec_site_conn_id=conn_id).one()[0]
except sql_exc.NoResultFound:
msg = _("Database inconsistency between IPSec connection and "
"Cisco CSR mapping table (%s)") % policy_type
raise CsrInternalError(reason=msg)
def lookup_ike_policy_id_for(conn_id, session):
"""Obtain existing Cisco CSR IKE policy ID from another connection."""
return lookup_policy(IKE_POLICY, IdentifierMap.csr_ike_policy_id,
conn_id, session)
def lookup_ipsec_policy_id_for(conn_id, session):
"""Obtain existing Cisco CSR IPSec policy ID from another connection."""
return lookup_policy(IPSEC_POLICY, IdentifierMap.csr_ipsec_policy_id,
conn_id, session)
def determine_csr_policy_id(policy_type, conn_policy_field, map_policy_field,
policy_id, conn_id, session):
"""Use existing or reserve a new policy ID for Cisco CSR use.
TODO(pcm) FUTURE: Once device driver adds support for IKE/IPSec policy
ID sharing, add call to find_conn_with_policy() to find used ID and
then call lookup_policy() to find the current mapping for that ID.
"""
csr_id = get_next_available_id(session, map_policy_field, policy_type)
LOG.debug(_("Reserved new CSR ID %(csr_id)d for %(policy)s "
"ID %(policy_id)s"), {'csr_id': csr_id,
'policy': policy_type,
'policy_id': policy_id})
return csr_id
def determine_csr_ike_policy_id(ike_policy_id, conn_id, session):
"""Use existing, or reserve a new IKE policy ID for Cisco CSR."""
return determine_csr_policy_id(IKE_POLICY,
vpn_db.IPsecSiteConnection.ikepolicy_id,
IdentifierMap.csr_ike_policy_id,
ike_policy_id, conn_id, session)
def determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id, session):
"""Use existing, or reserve a new IPSec policy ID for Cisco CSR."""
return determine_csr_policy_id(IPSEC_POLICY,
vpn_db.IPsecSiteConnection.ipsecpolicy_id,
IdentifierMap.csr_ipsec_policy_id,
ipsec_policy_id, conn_id, session)
def get_tunnel_mapping_for(conn_id, session):
try:
entry = session.query(IdentifierMap).filter_by(
ipsec_site_conn_id=conn_id).one()
LOG.debug(_("Mappings for IPSec connection %(conn)s - "
"tunnel=%(tunnel)s ike_policy=%(csr_ike)d "
"ipsec_policy=%(csr_ipsec)d"),
{'conn': conn_id, 'tunnel': entry.csr_tunnel_id,
'csr_ike': entry.csr_ike_policy_id,
'csr_ipsec': entry.csr_ipsec_policy_id})
return (entry.csr_tunnel_id, entry.csr_ike_policy_id,
entry.csr_ipsec_policy_id)
except sql_exc.NoResultFound:
msg = _("Existing entry for IPSec connection %s not found in Cisco "
"CSR mapping table") % conn_id
raise CsrInternalError(reason=msg)
def create_tunnel_mapping(context, conn_info):
"""Create Cisco CSR IDs, using mapping table and OpenStack UUIDs."""
conn_id = conn_info['id']
ike_policy_id = conn_info['ikepolicy_id']
ipsec_policy_id = conn_info['ipsecpolicy_id']
tenant_id = conn_info['tenant_id']
with context.session.begin():
csr_tunnel_id = get_next_available_tunnel_id(context.session)
csr_ike_id = determine_csr_ike_policy_id(ike_policy_id, conn_id,
context.session)
csr_ipsec_id = determine_csr_ipsec_policy_id(ipsec_policy_id, conn_id,
context.session)
map_entry = IdentifierMap(tenant_id=tenant_id,
ipsec_site_conn_id=conn_id,
csr_tunnel_id=csr_tunnel_id,
csr_ike_policy_id=csr_ike_id,
csr_ipsec_policy_id=csr_ipsec_id)
try:
context.session.add(map_entry)
# Force committing to database
context.session.flush()
except db_exc.DBDuplicateEntry:
msg = _("Attempt to create duplicate entry in Cisco CSR "
"mapping table for connection %s") % conn_id
raise CsrInternalError(reason=msg)
LOG.info(_("Mapped connection %(conn_id)s to Tunnel%(tunnel_id)d "
"using IKE policy ID %(ike_id)d and IPSec policy "
"ID %(ipsec_id)d"),
{'conn_id': conn_id, 'tunnel_id': csr_tunnel_id,
'ike_id': csr_ike_id, 'ipsec_id': csr_ipsec_id})
def delete_tunnel_mapping(context, conn_info):
conn_id = conn_info['id']
with context.session.begin():
sess_qry = context.session.query(IdentifierMap)
sess_qry.filter_by(ipsec_site_conn_id=conn_id).delete()
LOG.info(_("Removed mapping for connection %s"), conn_id)

View File

@ -1,245 +0,0 @@
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from netaddr import core as net_exc
from neutron.common import exceptions
from neutron.common import rpc_compat
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.vpn.common import topics
from neutron.services.vpn import service_drivers
from neutron.services.vpn.service_drivers import cisco_csr_db as csr_id_map
LOG = logging.getLogger(__name__)
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
'IPSec Policy': {'min': 120, 'max': 2592000}}
MIN_CSR_MTU = 1500
MAX_CSR_MTU = 9192
class CsrValidationFailure(exceptions.BadRequest):
message = _("Cisco CSR does not support %(resource)s attribute %(key)s "
"with value '%(value)s'")
class CiscoCsrIPsecVpnDriverCallBack(rpc_compat.RpcCallback):
"""Handler for agent to plugin RPC messaging."""
# history
# 1.0 Initial version
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, driver):
super(CiscoCsrIPsecVpnDriverCallBack, self).__init__()
self.driver = driver
def get_vpn_services_on_host(self, context, host=None):
"""Retuns info on the vpnservices on the host."""
plugin = self.driver.service_plugin
vpnservices = plugin._get_agent_hosting_vpn_services(
context, host)
return [self.driver._make_vpnservice_dict(vpnservice, context)
for vpnservice in vpnservices]
def update_status(self, context, status):
"""Update status of all vpnservices."""
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi,
rpc_compat.RpcCallback):
"""API and handler for Cisco IPSec plugin to agent RPC messaging."""
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, topic, default_version):
super(CiscoCsrIPsecVpnAgentApi, self).__init__(
topics.CISCO_IPSEC_AGENT_TOPIC, topic, default_version)
class CiscoCsrIPsecVPNDriver(service_drivers.VpnDriver):
"""Cisco CSR VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(CiscoCsrIPsecVPNDriver, self).__init__(service_plugin)
self.endpoints = [CiscoCsrIPsecVpnDriverCallBack(self)]
self.conn = rpc_compat.create_connection(new=True)
self.conn.create_consumer(
topics.CISCO_IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = CiscoCsrIPsecVpnAgentApi(
topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION)
@property
def service_type(self):
return IPSEC
def validate_lifetime(self, for_policy, policy_info):
"""Ensure lifetime in secs and value is supported, based on policy."""
units = policy_info['lifetime']['units']
if units != 'seconds':
raise CsrValidationFailure(resource=for_policy,
key='lifetime:units',
value=units)
value = policy_info['lifetime']['value']
if (value < LIFETIME_LIMITS[for_policy]['min'] or
value > LIFETIME_LIMITS[for_policy]['max']):
raise CsrValidationFailure(resource=for_policy,
key='lifetime:value',
value=value)
def validate_ike_version(self, policy_info):
"""Ensure IKE policy is v1 for current REST API."""
version = policy_info['ike_version']
if version != 'v1':
raise CsrValidationFailure(resource='IKE Policy',
key='ike_version',
value=version)
def validate_mtu(self, conn_info):
"""Ensure the MTU value is supported."""
mtu = conn_info['mtu']
if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU:
raise CsrValidationFailure(resource='IPSec Connection',
key='mtu',
value=mtu)
def validate_public_ip_present(self, vpn_service):
"""Ensure there is one gateway IP specified for the router used."""
gw_port = vpn_service.router.gw_port
if not gw_port or len(gw_port.fixed_ips) != 1:
raise CsrValidationFailure(resource='IPSec Connection',
key='router:gw_port:ip_address',
value='missing')
def validate_peer_id(self, ipsec_conn):
"""Ensure that an IP address is specified for peer ID."""
# TODO(pcm) Should we check peer_address too?
peer_id = ipsec_conn['peer_id']
try:
netaddr.IPAddress(peer_id)
except net_exc.AddrFormatError:
raise CsrValidationFailure(resource='IPSec Connection',
key='peer_id', value=peer_id)
def validate_ipsec_connection(self, context, ipsec_conn, vpn_service):
"""Validate attributes w.r.t. Cisco CSR capabilities."""
ike_policy = self.service_plugin.get_ikepolicy(
context, ipsec_conn['ikepolicy_id'])
ipsec_policy = self.service_plugin.get_ipsecpolicy(
context, ipsec_conn['ipsecpolicy_id'])
self.validate_lifetime('IKE Policy', ike_policy)
self.validate_lifetime('IPSec Policy', ipsec_policy)
self.validate_ike_version(ike_policy)
self.validate_mtu(ipsec_conn)
self.validate_public_ip_present(vpn_service)
self.validate_peer_id(ipsec_conn)
LOG.debug(_("IPSec connection %s validated for Cisco CSR"),
ipsec_conn['id'])
def create_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
try:
self.validate_ipsec_connection(context, ipsec_site_connection,
vpnservice)
except CsrValidationFailure:
with excutils.save_and_reraise_exception():
self.service_plugin.update_ipsec_site_conn_status(
context, ipsec_site_connection['id'], constants.ERROR)
csr_id_map.create_tunnel_mapping(context, ipsec_site_connection)
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-create')
def update_ipsec_site_connection(
self, context, old_ipsec_site_connection, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(
context, vpnservice['router_id'],
reason='ipsec-conn-update')
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-delete')
def create_ikepolicy(self, context, ikepolicy):
pass
def delete_ikepolicy(self, context, ikepolicy):
pass
def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
pass
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
def delete_ipsecpolicy(self, context, ipsecpolicy):
pass
def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
pass
def create_vpnservice(self, context, vpnservice):
pass
def update_vpnservice(self, context, old_vpnservice, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-update')
def delete_vpnservice(self, context, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-delete')
def get_cisco_connection_mappings(self, conn_id, context):
"""Obtain persisted mappings for IDs related to connection."""
tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for(
conn_id, context.session)
return {'site_conn_id': u'Tunnel%d' % tunnel_id,
'ike_policy_id': u'%d' % ike_id,
'ipsec_policy_id': u'%s' % ipsec_id}
def _make_vpnservice_dict(self, vpnservice, context):
"""Collect all info on service, including Cisco info per IPSec conn."""
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(
vpnservice.subnet)
vpnservice_dict['external_ip'] = vpnservice.router.gw_port[
'fixed_ips'][0]['ip_address']
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [
peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(
ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict

View File

@ -1,156 +0,0 @@
# vim: tabstop=10 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.common import rpc_compat
from neutron.openstack.common import log as logging
from neutron.services.vpn.common import topics
from neutron.services.vpn import service_drivers
LOG = logging.getLogger(__name__)
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
class IPsecVpnDriverCallBack(rpc_compat.RpcCallback):
"""Callback for IPSecVpnDriver rpc."""
# history
# 1.0 Initial version
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, driver):
super(IPsecVpnDriverCallBack, self).__init__()
self.driver = driver
def get_vpn_services_on_host(self, context, host=None):
"""Returns the vpnservices on the host."""
plugin = self.driver.service_plugin
vpnservices = plugin._get_agent_hosting_vpn_services(
context, host)
return [self.driver._make_vpnservice_dict(vpnservice)
for vpnservice in vpnservices]
def update_status(self, context, status):
"""Update status of vpnservices."""
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class IPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi,
rpc_compat.RpcCallback):
"""Agent RPC API for IPsecVPNAgent."""
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, topic, default_version):
super(IPsecVpnAgentApi, self).__init__(
topics.IPSEC_AGENT_TOPIC, topic, default_version)
class IPsecVPNDriver(service_drivers.VpnDriver):
"""VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(IPsecVPNDriver, self).__init__(service_plugin)
self.endpoints = [IPsecVpnDriverCallBack(self)]
self.conn = rpc_compat.create_connection(new=True)
self.conn.create_consumer(
topics.IPSEC_DRIVER_TOPIC, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = IPsecVpnAgentApi(
topics.IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION)
@property
def service_type(self):
return IPSEC
def create_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def update_ipsec_site_connection(
self, context, old_ipsec_site_connection, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def create_ikepolicy(self, context, ikepolicy):
pass
def delete_ikepolicy(self, context, ikepolicy):
pass
def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
pass
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
def delete_ipsecpolicy(self, context, ipsecpolicy):
pass
def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
pass
def create_vpnservice(self, context, vpnservice):
pass
def update_vpnservice(self, context, old_vpnservice, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def delete_vpnservice(self, context, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'])
def _make_vpnservice_dict(self, vpnservice):
"""Convert vpnservice information for vpn agent.
also converting parameter name for vpn agent driver
"""
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_site_connections'] = []
vpnservice_dict['subnet'] = dict(
vpnservice.subnet)
vpnservice_dict['external_ip'] = vpnservice.router.gw_port[
'fixed_ips'][0]['ip_address']
for ipsec_site_connection in vpnservice.ipsec_site_connections:
ipsec_site_connection_dict = dict(ipsec_site_connection)
try:
netaddr.IPAddress(ipsec_site_connection['peer_id'])
except netaddr.core.AddrFormatError:
ipsec_site_connection['peer_id'] = (
'@' + ipsec_site_connection['peer_id'])
ipsec_site_connection_dict['ikepolicy'] = dict(
ipsec_site_connection.ikepolicy)
ipsec_site_connection_dict['ipsecpolicy'] = dict(
ipsec_site_connection.ipsecpolicy)
vpnservice_dict['ipsec_site_connections'].append(
ipsec_site_connection_dict)
peer_cidrs = [
peer_cidr.cidr
for peer_cidr in ipsec_site_connection.peer_cidrs]
ipsec_site_connection_dict['peer_cidrs'] = peer_cidrs
return vpnservice_dict