diff --git a/neutron/plugins/bigswitch/README b/neutron/plugins/bigswitch/README deleted file mode 100644 index 43f157d12..000000000 --- a/neutron/plugins/bigswitch/README +++ /dev/null @@ -1,14 +0,0 @@ -# Neuron REST Proxy Plug-in for Big Switch and FloodLight Controllers - -This module provides a generic neutron plugin 'NeutronRestProxy' that -translates neutron function calls to authenticated REST requests (JSON supported) -to a set of redundant external network controllers. - -It also keeps a local persistent store of neutron state that has been -setup using that API. - -Currently the FloodLight Openflow Controller or the Big Switch Networks Controller -can be configured as external network controllers for this plugin. - -For more details on this plugin, please refer to the following link: -http://www.openflowhub.org/display/floodlightcontroller/Neutron+REST+Proxy+Plugin diff --git a/neutron/plugins/bigswitch/__init__.py b/neutron/plugins/bigswitch/__init__.py deleted file mode 100644 index 2a2421616..000000000 --- a/neutron/plugins/bigswitch/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# diff --git a/neutron/plugins/bigswitch/agent/__init__.py b/neutron/plugins/bigswitch/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/bigswitch/agent/restproxy_agent.py b/neutron/plugins/bigswitch/agent/restproxy_agent.py deleted file mode 100644 index 97aa7d0e3..000000000 --- a/neutron/plugins/bigswitch/agent/restproxy_agent.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2014 Big Switch Networks, Inc. -# All Rights Reserved. -# -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Kevin Benton, kevin.benton@bigswitch.com - -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg - -from neutron.agent.linux import ovs_lib -from neutron.agent.linux import utils -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config -from neutron.common import rpc_compat -from neutron.common import topics -from neutron import context as q_context -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import excutils -from neutron.openstack.common import log -from neutron.plugins.bigswitch import config as pl_config - -LOG = log.getLogger(__name__) - - -class IVSBridge(ovs_lib.OVSBridge): - ''' - This class does not provide parity with OVS using IVS. - It's only the bare minimum necessary to use IVS with this agent. - ''' - def run_vsctl(self, args, check_error=False): - full_args = ["ivs-ctl"] + args - try: - return utils.execute(full_args, root_helper=self.root_helper) - except Exception as e: - with excutils.save_and_reraise_exception() as ctxt: - LOG.error(_("Unable to execute %(cmd)s. " - "Exception: %(exception)s"), - {'cmd': full_args, 'exception': e}) - if not check_error: - ctxt.reraise = False - - def get_vif_port_set(self): - port_names = self.get_port_name_list() - edge_ports = set(port_names) - return edge_ports - - def get_vif_port_by_id(self, port_id): - # IVS in nova uses hybrid method with last 14 chars of UUID - name = 'qvo%s' % port_id[:14] - if name in self.get_vif_port_set(): - return name - return False - - -class PluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class SecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): - def __init__(self, context, plugin_rpc, root_helper): - self.context = context - self.plugin_rpc = plugin_rpc - self.root_helper = root_helper - self.init_firewall() - - -class RestProxyAgent(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - def __init__(self, integ_br, polling_interval, root_helper, vs='ovs'): - super(RestProxyAgent, self).__init__() - self.polling_interval = polling_interval - self._setup_rpc() - self.sg_agent = SecurityGroupAgent(self.context, - self.plugin_rpc, - root_helper) - if vs == 'ivs': - self.int_br = IVSBridge(integ_br, root_helper) - else: - self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) - - def _setup_rpc(self): - self.topic = topics.AGENT - self.plugin_rpc = PluginApi(topics.PLUGIN) - self.context = q_context.get_admin_context_without_session() - self.endpoints = [self] - consumers = [[topics.PORT, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - def port_update(self, context, **kwargs): - LOG.debug(_("Port update received")) - port = kwargs.get('port') - vif_port = self.int_br.get_vif_port_by_id(port['id']) - if not vif_port: - LOG.debug(_("Port %s is not present on this host."), port['id']) - return - - LOG.debug(_("Port %s found. Refreshing firewall."), port['id']) - if ext_sg.SECURITYGROUPS in port: - self.sg_agent.refresh_firewall() - - def _update_ports(self, registered_ports): - ports = self.int_br.get_vif_port_set() - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def _process_devices_filter(self, port_info): - if 'added' in port_info: - self.sg_agent.prepare_devices_filter(port_info['added']) - if 'removed' in port_info: - self.sg_agent.remove_devices_filter(port_info['removed']) - - def daemon_loop(self): - ports = set() - - while True: - start = time.time() - try: - port_info = self._update_ports(ports) - if port_info: - LOG.debug(_("Agent loop has new device")) - self._process_devices_filter(port_info) - ports = port_info['current'] - except Exception: - LOG.exception(_("Error in agent event loop")) - - elapsed = max(time.time() - start, 0) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - - -def main(): - config.init(sys.argv[1:]) - config.setup_logging(cfg.CONF) - pl_config.register_config() - - integ_br = cfg.CONF.RESTPROXYAGENT.integration_bridge - polling_interval = cfg.CONF.RESTPROXYAGENT.polling_interval - root_helper = cfg.CONF.AGENT.root_helper - bsnagent = RestProxyAgent(integ_br, polling_interval, root_helper, - cfg.CONF.RESTPROXYAGENT.virtual_switch_type) - bsnagent.daemon_loop() - sys.exit(0) - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/bigswitch/config.py b/neutron/plugins/bigswitch/config.py deleted file mode 100644 index 4646319c9..000000000 --- a/neutron/plugins/bigswitch/config.py +++ /dev/null @@ -1,123 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Big Switch Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mandeep Dhami, Big Switch Networks, Inc. -# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. -# @author: Kevin Benton, Big Switch Networks, Inc. - -""" -This module manages configuration options -""" - -from oslo.config import cfg - -from neutron.agent.common import config as agconfig -from neutron.common import utils -from neutron.extensions import portbindings - -restproxy_opts = [ - cfg.ListOpt('servers', default=['localhost:8800'], - help=_("A comma separated list of Big Switch or Floodlight " - "servers and port numbers. The plugin proxies the " - "requests to the Big Switch/Floodlight server, " - "which performs the networking configuration. Only one" - "server is needed per deployment, but you may wish to" - "deploy multiple servers to support failover.")), - cfg.StrOpt('server_auth', secret=True, - help=_("The username and password for authenticating against " - " the Big Switch or Floodlight controller.")), - cfg.BoolOpt('server_ssl', default=True, - help=_("If True, Use SSL when connecting to the Big Switch or " - "Floodlight controller.")), - cfg.BoolOpt('ssl_sticky', default=True, - help=_("Trust and store the first certificate received for " - "each controller address and use it to validate future " - "connections to that address.")), - cfg.BoolOpt('no_ssl_validation', default=False, - help=_("Disables SSL certificate validation for controllers")), - cfg.BoolOpt('cache_connections', default=True, - help=_("Re-use HTTP/HTTPS connections to the controller.")), - cfg.StrOpt('ssl_cert_directory', - default='/etc/neutron/plugins/bigswitch/ssl', - help=_("Directory containing ca_certs and host_certs " - "certificate directories.")), - cfg.BoolOpt('sync_data', default=False, - help=_("Sync data on connect")), - cfg.BoolOpt('auto_sync_on_failure', default=True, - help=_("If neutron fails to create a resource because " - "the backend controller doesn't know of a dependency, " - "the plugin automatically triggers a full data " - "synchronization to the controller.")), - cfg.IntOpt('consistency_interval', default=60, - help=_("Time between verifications that the backend controller " - "database is consistent with Neutron. (0 to disable)")), - cfg.IntOpt('server_timeout', default=10, - help=_("Maximum number of seconds to wait for proxy request " - "to connect and complete.")), - cfg.IntOpt('thread_pool_size', default=4, - help=_("Maximum number of threads to spawn to handle large " - "volumes of port creations.")), - cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(), - deprecated_name='quantum_id', - help=_("User defined identifier for this Neutron deployment")), - cfg.BoolOpt('add_meta_server_route', default=True, - help=_("Flag to decide if a route to the metadata server " - "should be injected into the VM")), -] -router_opts = [ - cfg.MultiStrOpt('tenant_default_router_rule', default=['*:any:any:permit'], - help=_("The default router rules installed in new tenant " - "routers. Repeat the config option for each rule. " - "Format is :::" - " Use an * to specify default for all tenants.")), - cfg.IntOpt('max_router_rules', default=200, - help=_("Maximum number of router rules")), -] -nova_opts = [ - cfg.StrOpt('vif_type', default='ovs', - help=_("Virtual interface type to configure on " - "Nova compute nodes")), -] - -# Each VIF Type can have a list of nova host IDs that are fixed to that type -for i in portbindings.VIF_TYPES: - opt = cfg.ListOpt('node_override_vif_' + i, default=[], - help=_("Nova compute nodes to manually set VIF " - "type to %s") % i) - nova_opts.append(opt) - -# Add the vif types for reference later -nova_opts.append(cfg.ListOpt('vif_types', - default=portbindings.VIF_TYPES, - help=_('List of allowed vif_type values.'))) - -agent_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_('Name of integration bridge on compute ' - 'nodes used for security group insertion.')), - cfg.IntOpt('polling_interval', default=5, - help=_('Seconds between agent checks for port changes')), - cfg.StrOpt('virtual_switch_type', default='ovs', - help=_('Virtual switch type.')) -] - - -def register_config(): - cfg.CONF.register_opts(restproxy_opts, "RESTPROXY") - cfg.CONF.register_opts(router_opts, "ROUTER") - cfg.CONF.register_opts(nova_opts, "NOVA") - cfg.CONF.register_opts(agent_opts, "RESTPROXYAGENT") - agconfig.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/bigswitch/db/__init__.py b/neutron/plugins/bigswitch/db/__init__.py deleted file mode 100644 index c05daecf8..000000000 --- a/neutron/plugins/bigswitch/db/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Big Switch Networks, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/neutron/plugins/bigswitch/db/consistency_db.py b/neutron/plugins/bigswitch/db/consistency_db.py deleted file mode 100644 index cd89a2690..000000000 --- a/neutron/plugins/bigswitch/db/consistency_db.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2014, Big Switch Networks -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sqlalchemy as sa - -from neutron.db import api as db -from neutron.db import model_base -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -''' -A simple table to store the latest consistency hash -received from a server in case neutron gets restarted. -''' - - -class ConsistencyHash(model_base.BASEV2): - ''' - For now we only support one global state so the - hash_id will always be '1' - ''' - __tablename__ = 'consistencyhashes' - hash_id = sa.Column(sa.String(255), - primary_key=True) - hash = sa.Column(sa.String(255), nullable=False) - - -def get_consistency_hash(hash_id='1'): - session = db.get_session() - with session.begin(subtransactions=True): - query = session.query(ConsistencyHash) - res = query.filter_by(hash_id=hash_id).first() - if not res: - return False - return res.hash - - -def put_consistency_hash(hash, hash_id='1'): - session = db.get_session() - with session.begin(subtransactions=True): - conhash = ConsistencyHash(hash_id=hash_id, hash=hash) - session.merge(conhash) - LOG.debug(_("Consistency hash for group %(hash_id)s updated " - "to %(hash)s"), {'hash_id': hash_id, 'hash': hash}) diff --git a/neutron/plugins/bigswitch/db/porttracker_db.py b/neutron/plugins/bigswitch/db/porttracker_db.py deleted file mode 100644 index 7966c7c7d..000000000 --- a/neutron/plugins/bigswitch/db/porttracker_db.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013, Big Switch Networks -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api.v2 import attributes -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -def get_port_hostid(context, port_id): - # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db - # relational table generation until one of the functions is called. - from neutron.db import portbindings_db - with context.session.begin(subtransactions=True): - query = context.session.query(portbindings_db.PortBindingPort) - res = query.filter_by(port_id=port_id).first() - if not res: - return False - return res.host - - -def put_port_hostid(context, port_id, host): - # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db - # relational table generation until one of the functions is called. - from neutron.db import portbindings_db - if not attributes.is_attr_set(host): - LOG.warning(_("No host_id in port request to track port location.")) - return - if port_id == '': - LOG.warning(_("Received an empty port ID for host_id '%s'"), host) - return - if host == '': - LOG.debug(_("Received an empty host_id for port '%s'"), port_id) - return - LOG.debug(_("Logging port %(port)s on host_id %(host)s"), - {'port': port_id, 'host': host}) - with context.session.begin(subtransactions=True): - location = portbindings_db.PortBindingPort(port_id=port_id, host=host) - context.session.merge(location) diff --git a/neutron/plugins/bigswitch/extensions/__init__.py b/neutron/plugins/bigswitch/extensions/__init__.py deleted file mode 100644 index c05daecf8..000000000 --- a/neutron/plugins/bigswitch/extensions/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Big Switch Networks, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kevin Benton, Big Switch Networks, Inc. diff --git a/neutron/plugins/bigswitch/extensions/routerrule.py b/neutron/plugins/bigswitch/extensions/routerrule.py deleted file mode 100644 index 2563d113d..000000000 --- a/neutron/plugins/bigswitch/extensions/routerrule.py +++ /dev/null @@ -1,144 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Big Switch Networks, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kevin Benton, Big Switch Networks, Inc. - -from neutron.api.v2 import attributes as attr -from neutron.common import exceptions as qexception -from neutron.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -# Router Rules Exceptions -class InvalidRouterRules(qexception.InvalidInput): - message = _("Invalid format for router rules: %(rule)s, %(reason)s") - - -class RulesExhausted(qexception.BadRequest): - message = _("Unable to complete rules update for %(router_id)s. " - "The number of rules exceeds the maximum %(quota)s.") - - -def convert_to_valid_router_rules(data): - """ - Validates and converts router rules to the appropriate data structure - Example argument = [{'source': 'any', 'destination': 'any', - 'action':'deny'}, - {'source': '1.1.1.1/32', 'destination': 'external', - 'action':'permit', - 'nexthops': ['1.1.1.254', '1.1.1.253']} - ] - """ - V4ANY = '0.0.0.0/0' - CIDRALL = ['any', 'external'] - if not isinstance(data, list): - emsg = _("Invalid data format for router rule: '%s'") % data - LOG.debug(emsg) - raise qexception.InvalidInput(error_message=emsg) - _validate_uniquerules(data) - rules = [] - expected_keys = ['source', 'destination', 'action'] - for rule in data: - rule['nexthops'] = rule.get('nexthops', []) - if not isinstance(rule['nexthops'], list): - rule['nexthops'] = rule['nexthops'].split('+') - - src = V4ANY if rule['source'] in CIDRALL else rule['source'] - dst = V4ANY if rule['destination'] in CIDRALL else rule['destination'] - - errors = [attr._verify_dict_keys(expected_keys, rule, False), - attr._validate_subnet(dst), - attr._validate_subnet(src), - _validate_nexthops(rule['nexthops']), - _validate_action(rule['action'])] - errors = [m for m in errors if m] - if errors: - LOG.debug(errors) - raise qexception.InvalidInput(error_message=errors) - rules.append(rule) - return rules - - -def _validate_nexthops(nexthops): - seen = [] - for ip in nexthops: - msg = attr._validate_ip_address(ip) - if ip in seen: - msg = _("Duplicate nexthop in rule '%s'") % ip - seen.append(ip) - if msg: - return msg - - -def _validate_action(action): - if action not in ['permit', 'deny']: - return _("Action must be either permit or deny." - " '%s' was provided") % action - - -def _validate_uniquerules(rules): - pairs = [] - for r in rules: - if 'source' not in r or 'destination' not in r: - continue - pairs.append((r['source'], r['destination'])) - - if len(set(pairs)) != len(pairs): - error = _("Duplicate router rules (src,dst) found '%s'") % pairs - LOG.debug(error) - raise qexception.InvalidInput(error_message=error) - - -class Routerrule(object): - - @classmethod - def get_name(cls): - return "Neutron Router Rule" - - @classmethod - def get_alias(cls): - return "router_rules" - - @classmethod - def get_description(cls): - return "Router rule configuration for L3 router" - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0" - - @classmethod - def get_updated(cls): - return "2013-05-23T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} - -# Attribute Map -EXTENDED_ATTRIBUTES_2_0 = { - 'routers': { - 'router_rules': {'allow_post': False, 'allow_put': True, - 'convert_to': convert_to_valid_router_rules, - 'is_visible': True, - 'default': attr.ATTR_NOT_SPECIFIED}, - } -} diff --git a/neutron/plugins/bigswitch/plugin.py b/neutron/plugins/bigswitch/plugin.py deleted file mode 100644 index c13c45b65..000000000 --- a/neutron/plugins/bigswitch/plugin.py +++ /dev/null @@ -1,1115 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 Big Switch Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mandeep Dhami, Big Switch Networks, Inc. -# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. - -""" -Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers. - -NeutronRestProxy provides a generic neutron plugin that translates all plugin -function calls to equivalent authenticated REST calls to a set of redundant -external network controllers. It also keeps persistent store for all neutron -state to allow for re-sync of the external controller(s), if required. - -The local state on the plugin also allows for local response and fast-fail -semantics where it can be determined based on the local persistent store. - -Network controller specific code is decoupled from this plugin and expected -to reside on the controller itself (via the REST interface). - -This allows for: - - independent authentication and redundancy schemes between neutron and the - network controller - - independent upgrade/development cycles between neutron and the controller - as it limits the proxy code upgrade requirement to neutron release cycle - and the controller specific code upgrade requirement to controller code - - ability to sync the controller with neutron for independent recovery/reset - -External REST API used by proxy is the same API as defined for neutron (JSON -subset) with some additional parameters (gateway on network-create and macaddr -on port-attach) on an additional PUT to do a bulk dump of all persistent data. -""" - -import copy -import httplib -import re - -import eventlet -from oslo.config import cfg -from sqlalchemy.orm import exc as sqlexc - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api import extensions as neutron_extensions -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.common import constants as const -from neutron.common import exceptions -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron import context as qcontext -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extradhcpopt_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.db import securitygroups_rpc_base as sg_rpc_base -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import external_net -from neutron.extensions import extra_dhcp_opt as edo_ext -from neutron.extensions import l3 -from neutron.extensions import portbindings -from neutron import manager -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.bigswitch import config as pl_config -from neutron.plugins.bigswitch.db import porttracker_db -from neutron.plugins.bigswitch import extensions -from neutron.plugins.bigswitch import routerrule_db -from neutron.plugins.bigswitch import servermanager -from neutron.plugins.bigswitch import version - -LOG = logging.getLogger(__name__) - -SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin') -METADATA_SERVER_IP = '169.254.169.254' - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - - BASE_RPC_API_VERSION = '1.1' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_port_update = topics.get_topic_name( - topic, topics.PORT, topics.UPDATE) - - def port_update(self, context, port): - self.fanout_cast(context, - self.make_msg('port_update', - port=port), - topic=self.topic_port_update) - - -class RestProxyCallbacks(rpc_compat.RpcCallback, - sg_rpc_base.SecurityGroupServerRpcCallbackMixin, - dhcp_rpc_base.DhcpRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - def get_port_from_device(self, device): - port_id = re.sub(r"^tap", "", device) - port = self.get_port_and_sgs(port_id) - if port: - port['device'] = device - return port - - def get_port_and_sgs(self, port_id): - """Get port from database with security group info.""" - - LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - with session.begin(subtransactions=True): - query = session.query( - models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id - ) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id.startswith(port_id)) - port_and_sgs = query.all() - if not port_and_sgs: - return - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict['security_groups'] = [ - sg_id for port_, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict - - -class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - routerrule_db.RouterRule_db_mixin): - - supported_extension_aliases = ["binding"] - servers = None - - def _get_all_data(self, get_ports=True, get_floating_ips=True, - get_routers=True): - admin_context = qcontext.get_admin_context() - networks = [] - # this method is used by the ML2 driver so it can't directly invoke - # the self.get_(ports|networks) methods - plugin = manager.NeutronManager.get_plugin() - all_networks = plugin.get_networks(admin_context) or [] - for net in all_networks: - mapped_network = self._get_mapped_network_with_subnets(net) - flips_n_ports = mapped_network - if get_floating_ips: - flips_n_ports = self._get_network_with_floatingips( - mapped_network) - - if get_ports: - ports = [] - net_filter = {'network_id': [net.get('id')]} - net_ports = plugin.get_ports(admin_context, - filters=net_filter) or [] - for port in net_ports: - mapped_port = self._map_state_and_status(port) - mapped_port['attachment'] = { - 'id': port.get('device_id'), - 'mac': port.get('mac_address'), - } - mapped_port = self._extend_port_dict_binding(admin_context, - mapped_port) - ports.append(mapped_port) - flips_n_ports['ports'] = ports - - if flips_n_ports: - networks.append(flips_n_ports) - - data = {'networks': networks} - - if get_routers: - routers = [] - all_routers = self.get_routers(admin_context) or [] - for router in all_routers: - interfaces = [] - mapped_router = self._map_state_and_status(router) - router_filter = { - 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF], - 'device_id': [router.get('id')] - } - router_ports = self.get_ports(admin_context, - filters=router_filter) or [] - for port in router_ports: - net_id = port.get('network_id') - subnet_id = port['fixed_ips'][0]['subnet_id'] - intf_details = self._get_router_intf_details(admin_context, - net_id, - subnet_id) - interfaces.append(intf_details) - mapped_router['interfaces'] = interfaces - - routers.append(mapped_router) - - data.update({'routers': routers}) - return data - - def _send_all_data(self, send_ports=True, send_floating_ips=True, - send_routers=True, timeout=None, - triggered_by_tenant=None): - """Pushes all data to network ctrl (networks/ports, ports/attachments). - - This gives the controller an option to re-sync it's persistent store - with neutron's current view of that data. - """ - data = self._get_all_data(send_ports, send_floating_ips, send_routers) - data['triggered_by_tenant'] = triggered_by_tenant - errstr = _("Unable to update remote topology: %s") - return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH, - data, errstr, timeout=timeout) - - def _get_network_with_floatingips(self, network, context=None): - if context is None: - context = qcontext.get_admin_context() - - net_id = network['id'] - net_filter = {'floating_network_id': [net_id]} - fl_ips = self.get_floatingips(context, - filters=net_filter) or [] - network['floatingips'] = fl_ips - - return network - - def _get_all_subnets_json_for_network(self, net_id, context=None): - if context is None: - context = qcontext.get_admin_context() - # start a sub-transaction to avoid breaking parent transactions - with context.session.begin(subtransactions=True): - subnets = self._get_subnets_by_network(context, - net_id) - subnets_details = [] - if subnets: - for subnet in subnets: - subnet_dict = self._make_subnet_dict(subnet) - mapped_subnet = self._map_state_and_status(subnet_dict) - subnets_details.append(mapped_subnet) - - return subnets_details - - def _get_mapped_network_with_subnets(self, network, context=None): - # if context is not provided, admin context is used - if context is None: - context = qcontext.get_admin_context() - network = self._map_state_and_status(network) - subnets = self._get_all_subnets_json_for_network(network['id'], - context) - network['subnets'] = subnets - for subnet in (subnets or []): - if subnet['gateway_ip']: - # FIX: For backward compatibility with wire protocol - network['gateway'] = subnet['gateway_ip'] - break - else: - network['gateway'] = '' - network[external_net.EXTERNAL] = self._network_is_external( - context, network['id']) - # include ML2 segmentation types - network['segmentation_types'] = getattr(self, "segmentation_types", "") - return network - - def _send_create_network(self, network, context=None): - tenant_id = network['tenant_id'] - mapped_network = self._get_mapped_network_with_subnets(network, - context) - self.servers.rest_create_network(tenant_id, mapped_network) - - def _send_update_network(self, network, context=None): - net_id = network['id'] - tenant_id = network['tenant_id'] - mapped_network = self._get_mapped_network_with_subnets(network, - context) - net_fl_ips = self._get_network_with_floatingips(mapped_network, - context) - self.servers.rest_update_network(tenant_id, net_id, net_fl_ips) - - def _send_delete_network(self, network, context=None): - net_id = network['id'] - tenant_id = network['tenant_id'] - self.servers.rest_delete_network(tenant_id, net_id) - - def _map_state_and_status(self, resource): - resource = copy.copy(resource) - - resource['state'] = ('UP' if resource.pop('admin_state_up', - True) else 'DOWN') - resource.pop('status', None) - - return resource - - def _warn_on_state_status(self, resource): - if resource.get('admin_state_up', True) is False: - LOG.warning(_("Setting admin_state_up=False is not supported " - "in this plugin version. Ignoring setting for " - "resource: %s"), resource) - - if 'status' in resource: - if resource['status'] != const.NET_STATUS_ACTIVE: - LOG.warning(_("Operational status is internally set by the " - "plugin. Ignoring setting status=%s."), - resource['status']) - - def _get_router_intf_details(self, context, intf_id, subnet_id): - - # we will use the network id as interface's id - net_id = intf_id - network = self.get_network(context, net_id) - subnet = self.get_subnet(context, subnet_id) - mapped_network = self._get_mapped_network_with_subnets(network) - mapped_subnet = self._map_state_and_status(subnet) - - data = { - 'id': intf_id, - "network": mapped_network, - "subnet": mapped_subnet - } - - return data - - def _extend_port_dict_binding(self, context, port): - cfg_vif_type = cfg.CONF.NOVA.vif_type.lower() - if not cfg_vif_type in (portbindings.VIF_TYPE_OVS, - portbindings.VIF_TYPE_IVS): - LOG.warning(_("Unrecognized vif_type in configuration " - "[%s]. Defaulting to ovs."), - cfg_vif_type) - cfg_vif_type = portbindings.VIF_TYPE_OVS - # In ML2, the host_id is already populated - if portbindings.HOST_ID in port: - hostid = port[portbindings.HOST_ID] - else: - hostid = porttracker_db.get_port_hostid(context, port['id']) - if hostid: - port[portbindings.HOST_ID] = hostid - override = self._check_hostvif_override(hostid) - if override: - cfg_vif_type = override - port[portbindings.VIF_TYPE] = cfg_vif_type - - port[portbindings.VIF_DETAILS] = { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases, - portbindings.OVS_HYBRID_PLUG: True - } - return port - - def _check_hostvif_override(self, hostid): - for v in cfg.CONF.NOVA.vif_types: - if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []): - return v - return False - - def _get_port_net_tenantid(self, context, port): - net = super(NeutronRestProxyV2Base, - self).get_network(context, port["network_id"]) - return net['tenant_id'] - - def async_port_create(self, tenant_id, net_id, port): - try: - self.servers.rest_create_port(tenant_id, net_id, port) - except servermanager.RemoteRestError as e: - # 404 should never be received on a port create unless - # there are inconsistencies between the data in neutron - # and the data in the backend. - # Run a sync to get it consistent. - if (cfg.CONF.RESTPROXY.auto_sync_on_failure and - e.status == httplib.NOT_FOUND and - servermanager.NXNETWORK in e.reason): - LOG.error(_("Iconsistency with backend controller " - "triggering full synchronization.")) - # args depend on if we are operating in ML2 driver - # or as the full plugin - topoargs = self.servers.get_topo_function_args - self._send_all_data( - send_ports=topoargs['get_ports'], - send_floating_ips=topoargs['get_floating_ips'], - send_routers=topoargs['get_routers'], - triggered_by_tenant=tenant_id - ) - # If the full sync worked, the port will be created - # on the controller so it can be safely marked as active - else: - # Any errors that don't result in a successful auto-sync - # require that the port be placed into the error state. - LOG.error( - _("NeutronRestProxyV2: Unable to create port: %s"), e) - try: - self._set_port_status(port['id'], const.PORT_STATUS_ERROR) - except exceptions.PortNotFound: - # If port is already gone from DB and there was an error - # creating on the backend, everything is already consistent - pass - return - new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP' - else const.PORT_STATUS_DOWN) - try: - self._set_port_status(port['id'], new_status) - except exceptions.PortNotFound: - # This port was deleted before the create made it to the controller - # so it now needs to be deleted since the normal delete request - # would have deleted an non-existent port. - self.servers.rest_delete_port(tenant_id, net_id, port['id']) - - # NOTE(kevinbenton): workaround for eventlet/mysql deadlock - @utils.synchronized('bsn-port-barrier') - def _set_port_status(self, port_id, status): - session = db.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.flush() - except sqlexc.NoResultFound: - raise exceptions.PortNotFound(port_id=port_id) - - -class NeutronRestProxyV2(NeutronRestProxyV2Base, - addr_pair_db.AllowedAddressPairsMixin, - extradhcpopt_db.ExtraDhcpOptMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - sg_rpc_base.SecurityGroupServerRpcMixin): - - _supported_extension_aliases = ["external-net", "router", "binding", - "router_rules", "extra_dhcp_opt", "quotas", - "dhcp_agent_scheduler", "agent", - "security-group", "allowed-address-pairs"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - super(NeutronRestProxyV2, self).__init__() - LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'), - version.version_string_with_vcs()) - pl_config.register_config() - self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) - - # Include the Big Switch Extensions path in the api_extensions - neutron_extensions.append_api_extensions_path(extensions.__path__) - - self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route - - # init network ctrl connections - self.servers = servermanager.ServerPool() - self.servers.get_topo_function = self._get_all_data - self.servers.get_topo_function_args = {'get_ports': True, - 'get_floating_ips': True, - 'get_routers': True} - - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - - # setup rpc for security and DHCP agents - self._setup_rpc() - - if cfg.CONF.RESTPROXY.sync_data: - self._send_all_data() - - LOG.debug(_("NeutronRestProxyV2: initialization done")) - - def _setup_rpc(self): - self.conn = rpc_compat.create_connection(new=True) - self.topic = topics.PLUGIN - self.notifier = AgentNotifierApi(topics.AGENT) - # init dhcp agent support - self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - self._dhcp_agent_notifier - ) - self.endpoints = [RestProxyCallbacks(), - agents_db.AgentExtRpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def create_network(self, context, network): - """Create a network. - - Network represents an L2 network segment which can have a set of - subnets and ports associated with it. - - :param context: neutron api request context - :param network: dictionary describing the network - - :returns: a sequence of mappings with the following signature: - { - "id": UUID representing the network. - "name": Human-readable name identifying the network. - "tenant_id": Owner of network. NOTE: only admin user can specify - a tenant_id other than its own. - "admin_state_up": Sets admin state of network. - if down, network does not forward packets. - "status": Indicates whether network is currently operational - (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") - "subnets": Subnets associated with this network. - } - - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2: create_network() called")) - - self._warn_on_state_status(network['network']) - - with context.session.begin(subtransactions=True): - self._ensure_default_security_group( - context, - network['network']["tenant_id"] - ) - # create network in DB - new_net = super(NeutronRestProxyV2, self).create_network(context, - network) - self._process_l3_create(context, new_net, network['network']) - # create network on the network controller - self._send_create_network(new_net, context) - - # return created network - return new_net - - def update_network(self, context, net_id, network): - """Updates the properties of a particular Virtual Network. - - :param context: neutron api request context - :param net_id: uuid of the network to update - :param network: dictionary describing the updates - - :returns: a sequence of mappings with the following signature: - { - "id": UUID representing the network. - "name": Human-readable name identifying the network. - "tenant_id": Owner of network. NOTE: only admin user can - specify a tenant_id other than its own. - "admin_state_up": Sets admin state of network. - if down, network does not forward packets. - "status": Indicates whether network is currently operational - (values are "ACTIVE", "DOWN", "BUILD", and "ERROR") - "subnets": Subnets associated with this network. - } - - :raises: exceptions.NetworkNotFound - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2.update_network() called")) - - self._warn_on_state_status(network['network']) - - session = context.session - with session.begin(subtransactions=True): - new_net = super(NeutronRestProxyV2, self).update_network( - context, net_id, network) - self._process_l3_update(context, new_net, network['network']) - - # update network on network controller - self._send_update_network(new_net, context) - return new_net - - # NOTE(kevinbenton): workaround for eventlet/mysql deadlock - @utils.synchronized('bsn-port-barrier') - def delete_network(self, context, net_id): - """Delete a network. - :param context: neutron api request context - :param id: UUID representing the network to delete. - - :returns: None - - :raises: exceptions.NetworkInUse - :raises: exceptions.NetworkNotFound - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2: delete_network() called")) - - # Validate args - orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id) - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, net_id) - ret_val = super(NeutronRestProxyV2, self).delete_network(context, - net_id) - self._send_delete_network(orig_net, context) - return ret_val - - def create_port(self, context, port): - """Create a port, which is a connection point of a device - (e.g., a VM NIC) to attach to a L2 Neutron network. - :param context: neutron api request context - :param port: dictionary describing the port - - :returns: - { - "id": uuid represeting the port. - "network_id": uuid of network. - "tenant_id": tenant_id - "mac_address": mac address to use on this port. - "admin_state_up": Sets admin state of port. if down, port - does not forward packets. - "status": dicates whether port is currently operational - (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") - "fixed_ips": list of subnet ID"s and IP addresses to be used on - this port - "device_id": identifies the device (e.g., virtual server) using - this port. - } - - :raises: exceptions.NetworkNotFound - :raises: exceptions.StateInvalid - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2: create_port() called")) - - # Update DB in new session so exceptions rollback changes - with context.session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - # non-router port status is set to pending. it is then updated - # after the async rest call completes. router ports are synchronous - if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: - port['port']['status'] = const.PORT_STATUS_ACTIVE - else: - port['port']['status'] = const.PORT_STATUS_BUILD - dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) - new_port = super(NeutronRestProxyV2, self).create_port(context, - port) - self._process_port_create_security_group(context, new_port, sgids) - if (portbindings.HOST_ID in port['port'] - and 'id' in new_port): - host_id = port['port'][portbindings.HOST_ID] - porttracker_db.put_port_hostid(context, new_port['id'], - host_id) - new_port[addr_pair.ADDRESS_PAIRS] = ( - self._process_create_allowed_address_pairs( - context, new_port, - port['port'].get(addr_pair.ADDRESS_PAIRS))) - self._process_port_create_extra_dhcp_opts(context, new_port, - dhcp_opts) - new_port = self._extend_port_dict_binding(context, new_port) - net = super(NeutronRestProxyV2, - self).get_network(context, new_port["network_id"]) - if self.add_meta_server_route: - if new_port['device_owner'] == const.DEVICE_OWNER_DHCP: - destination = METADATA_SERVER_IP + '/32' - self._add_host_route(context, destination, new_port) - - # create on network ctrl - mapped_port = self._map_state_and_status(new_port) - # ports have to be created synchronously when creating a router - # port since adding router interfaces is a multi-call process - if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF: - self.servers.rest_create_port(net["tenant_id"], - new_port["network_id"], - mapped_port) - else: - self.evpool.spawn_n(self.async_port_create, net["tenant_id"], - new_port["network_id"], mapped_port) - self.notify_security_groups_member_updated(context, new_port) - return new_port - - def get_port(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - port = super(NeutronRestProxyV2, self).get_port(context, id, - fields) - self._extend_port_dict_binding(context, port) - return self._fields(port, fields) - - def get_ports(self, context, filters=None, fields=None): - with context.session.begin(subtransactions=True): - ports = super(NeutronRestProxyV2, self).get_ports(context, filters, - fields) - for port in ports: - self._extend_port_dict_binding(context, port) - return [self._fields(port, fields) for port in ports] - - def update_port(self, context, port_id, port): - """Update values of a port. - - :param context: neutron api request context - :param id: UUID representing the port to update. - :param port: dictionary with keys indicating fields to update. - - :returns: a mapping sequence with the following signature: - { - "id": uuid represeting the port. - "network_id": uuid of network. - "tenant_id": tenant_id - "mac_address": mac address to use on this port. - "admin_state_up": sets admin state of port. if down, port - does not forward packets. - "status": dicates whether port is currently operational - (limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR") - "fixed_ips": list of subnet ID's and IP addresses to be used on - this port - "device_id": identifies the device (e.g., virtual server) using - this port. - } - - :raises: exceptions.StateInvalid - :raises: exceptions.PortNotFound - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2: update_port() called")) - - self._warn_on_state_status(port['port']) - - # Validate Args - orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id) - with context.session.begin(subtransactions=True): - # Update DB - new_port = super(NeutronRestProxyV2, - self).update_port(context, port_id, port) - ctrl_update_required = False - if addr_pair.ADDRESS_PAIRS in port['port']: - ctrl_update_required |= ( - self.update_address_pairs_on_port(context, port_id, port, - orig_port, new_port)) - self._update_extra_dhcp_opts_on_port(context, port_id, port, - new_port) - old_host_id = porttracker_db.get_port_hostid(context, - orig_port['id']) - if (portbindings.HOST_ID in port['port'] - and 'id' in new_port): - host_id = port['port'][portbindings.HOST_ID] - porttracker_db.put_port_hostid(context, new_port['id'], - host_id) - if old_host_id != host_id: - ctrl_update_required = True - - if (new_port.get("device_id") != orig_port.get("device_id") and - orig_port.get("device_id")): - ctrl_update_required = True - - if ctrl_update_required: - # tenant_id must come from network in case network is shared - net_tenant_id = self._get_port_net_tenantid(context, new_port) - new_port = self._extend_port_dict_binding(context, new_port) - mapped_port = self._map_state_and_status(new_port) - self.servers.rest_update_port(net_tenant_id, - new_port["network_id"], - mapped_port) - agent_update_required = self.update_security_group_on_port( - context, port_id, port, orig_port, new_port) - agent_update_required |= self.is_security_group_member_updated( - context, orig_port, new_port) - - # return new_port - return new_port - - # NOTE(kevinbenton): workaround for eventlet/mysql deadlock - @utils.synchronized('bsn-port-barrier') - def delete_port(self, context, port_id, l3_port_check=True): - """Delete a port. - :param context: neutron api request context - :param id: UUID representing the port to delete. - - :raises: exceptions.PortInUse - :raises: exceptions.PortNotFound - :raises: exceptions.NetworkNotFound - :raises: RemoteRestError - """ - LOG.debug(_("NeutronRestProxyV2: delete_port() called")) - - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, port_id) - with context.session.begin(subtransactions=True): - self.disassociate_floatingips(context, port_id) - self._delete_port_security_group_bindings(context, port_id) - port = super(NeutronRestProxyV2, self).get_port(context, port_id) - # Tenant ID must come from network in case the network is shared - tenid = self._get_port_net_tenantid(context, port) - self._delete_port(context, port_id) - self.servers.rest_delete_port(tenid, port['network_id'], port_id) - - def create_subnet(self, context, subnet): - LOG.debug(_("NeutronRestProxyV2: create_subnet() called")) - - self._warn_on_state_status(subnet['subnet']) - - with context.session.begin(subtransactions=True): - # create subnet in DB - new_subnet = super(NeutronRestProxyV2, - self).create_subnet(context, subnet) - net_id = new_subnet['network_id'] - orig_net = super(NeutronRestProxyV2, - self).get_network(context, net_id) - # update network on network controller - self._send_update_network(orig_net, context) - return new_subnet - - def update_subnet(self, context, id, subnet): - LOG.debug(_("NeutronRestProxyV2: update_subnet() called")) - - self._warn_on_state_status(subnet['subnet']) - - with context.session.begin(subtransactions=True): - # update subnet in DB - new_subnet = super(NeutronRestProxyV2, - self).update_subnet(context, id, subnet) - net_id = new_subnet['network_id'] - orig_net = super(NeutronRestProxyV2, - self).get_network(context, net_id) - # update network on network controller - self._send_update_network(orig_net, context) - return new_subnet - - # NOTE(kevinbenton): workaround for eventlet/mysql deadlock - @utils.synchronized('bsn-port-barrier') - def delete_subnet(self, context, id): - LOG.debug(_("NeutronRestProxyV2: delete_subnet() called")) - orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id) - net_id = orig_subnet['network_id'] - with context.session.begin(subtransactions=True): - # delete subnet in DB - super(NeutronRestProxyV2, self).delete_subnet(context, id) - orig_net = super(NeutronRestProxyV2, self).get_network(context, - net_id) - # update network on network controller - exception will rollback - self._send_update_network(orig_net, context) - - def _get_tenant_default_router_rules(self, tenant): - rules = cfg.CONF.ROUTER.tenant_default_router_rule - defaultset = [] - tenantset = [] - for rule in rules: - items = rule.split(':') - if len(items) == 5: - (tenantid, source, destination, action, nexthops) = items - elif len(items) == 4: - (tenantid, source, destination, action) = items - nexthops = '' - else: - continue - parsedrule = {'source': source, - 'destination': destination, 'action': action, - 'nexthops': nexthops.split(',')} - if parsedrule['nexthops'][0] == '': - parsedrule['nexthops'] = [] - if tenantid == '*': - defaultset.append(parsedrule) - if tenantid == tenant: - tenantset.append(parsedrule) - if tenantset: - return tenantset - return defaultset - - def create_router(self, context, router): - LOG.debug(_("NeutronRestProxyV2: create_router() called")) - - self._warn_on_state_status(router['router']) - - tenant_id = self._get_tenant_id_for_create(context, router["router"]) - - # set default router rules - rules = self._get_tenant_default_router_rules(tenant_id) - router['router']['router_rules'] = rules - - with context.session.begin(subtransactions=True): - # create router in DB - new_router = super(NeutronRestProxyV2, self).create_router(context, - router) - mapped_router = self._map_state_and_status(new_router) - self.servers.rest_create_router(tenant_id, mapped_router) - - # return created router - return new_router - - def update_router(self, context, router_id, router): - - LOG.debug(_("NeutronRestProxyV2.update_router() called")) - - self._warn_on_state_status(router['router']) - - orig_router = super(NeutronRestProxyV2, self).get_router(context, - router_id) - tenant_id = orig_router["tenant_id"] - with context.session.begin(subtransactions=True): - new_router = super(NeutronRestProxyV2, - self).update_router(context, router_id, router) - router = self._map_state_and_status(new_router) - - # update router on network controller - self.servers.rest_update_router(tenant_id, router, router_id) - - # return updated router - return new_router - - # NOTE(kevinbenton): workaround for eventlet/mysql deadlock. - # delete_router ends up calling _delete_port instead of delete_port. - @utils.synchronized('bsn-port-barrier') - def delete_router(self, context, router_id): - LOG.debug(_("NeutronRestProxyV2: delete_router() called")) - - with context.session.begin(subtransactions=True): - orig_router = self._get_router(context, router_id) - tenant_id = orig_router["tenant_id"] - - # Ensure that the router is not used - router_filter = {'router_id': [router_id]} - fips = self.get_floatingips_count(context.elevated(), - filters=router_filter) - if fips: - raise l3.RouterInUse(router_id=router_id) - - device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF - device_filter = {'device_id': [router_id], - 'device_owner': [device_owner]} - ports = self.get_ports_count(context.elevated(), - filters=device_filter) - if ports: - raise l3.RouterInUse(router_id=router_id) - ret_val = super(NeutronRestProxyV2, - self).delete_router(context, router_id) - - # delete from network ctrl - self.servers.rest_delete_router(tenant_id, router_id) - return ret_val - - def add_router_interface(self, context, router_id, interface_info): - - LOG.debug(_("NeutronRestProxyV2: add_router_interface() called")) - - # Validate args - router = self._get_router(context, router_id) - tenant_id = router['tenant_id'] - - with context.session.begin(subtransactions=True): - # create interface in DB - new_intf_info = super(NeutronRestProxyV2, - self).add_router_interface(context, - router_id, - interface_info) - port = self._get_port(context, new_intf_info['port_id']) - net_id = port['network_id'] - subnet_id = new_intf_info['subnet_id'] - # we will use the port's network id as interface's id - interface_id = net_id - intf_details = self._get_router_intf_details(context, - interface_id, - subnet_id) - - # create interface on the network controller - self.servers.rest_add_router_interface(tenant_id, router_id, - intf_details) - return new_intf_info - - def remove_router_interface(self, context, router_id, interface_info): - - LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called")) - - # Validate args - router = self._get_router(context, router_id) - tenant_id = router['tenant_id'] - - # we will first get the interface identifier before deleting in the DB - if not interface_info: - msg = _("Either subnet_id or port_id must be specified") - raise exceptions.BadRequest(resource='router', msg=msg) - if 'port_id' in interface_info: - port = self._get_port(context, interface_info['port_id']) - interface_id = port['network_id'] - elif 'subnet_id' in interface_info: - subnet = self._get_subnet(context, interface_info['subnet_id']) - interface_id = subnet['network_id'] - else: - msg = _("Either subnet_id or port_id must be specified") - raise exceptions.BadRequest(resource='router', msg=msg) - - with context.session.begin(subtransactions=True): - # remove router in DB - del_ret = super(NeutronRestProxyV2, - self).remove_router_interface(context, - router_id, - interface_info) - - # create router on the network controller - self.servers.rest_remove_router_interface(tenant_id, router_id, - interface_id) - return del_ret - - def create_floatingip(self, context, floatingip): - LOG.debug(_("NeutronRestProxyV2: create_floatingip() called")) - - with context.session.begin(subtransactions=True): - # create floatingip in DB - new_fl_ip = super(NeutronRestProxyV2, - self).create_floatingip(context, floatingip) - - # create floatingip on the network controller - try: - if 'floatingip' in self.servers.get_capabilities(): - self.servers.rest_create_floatingip( - new_fl_ip['tenant_id'], new_fl_ip) - else: - self._send_floatingip_update(context) - except servermanager.RemoteRestError as e: - with excutils.save_and_reraise_exception(): - LOG.error( - _("NeutronRestProxyV2: Unable to create remote " - "floating IP: %s"), e) - # return created floating IP - return new_fl_ip - - def update_floatingip(self, context, id, floatingip): - LOG.debug(_("NeutronRestProxyV2: update_floatingip() called")) - - with context.session.begin(subtransactions=True): - # update floatingip in DB - new_fl_ip = super(NeutronRestProxyV2, - self).update_floatingip(context, id, floatingip) - - # update network on network controller - if 'floatingip' in self.servers.get_capabilities(): - self.servers.rest_update_floatingip(new_fl_ip['tenant_id'], - new_fl_ip, id) - else: - self._send_floatingip_update(context) - return new_fl_ip - - def delete_floatingip(self, context, id): - LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called")) - - with context.session.begin(subtransactions=True): - # delete floating IP in DB - old_fip = super(NeutronRestProxyV2, self).get_floatingip(context, - id) - super(NeutronRestProxyV2, self).delete_floatingip(context, id) - - # update network on network controller - if 'floatingip' in self.servers.get_capabilities(): - self.servers.rest_delete_floatingip(old_fip['tenant_id'], id) - else: - self._send_floatingip_update(context) - - def disassociate_floatingips(self, context, port_id): - LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called")) - super(NeutronRestProxyV2, self).disassociate_floatingips(context, - port_id) - self._send_floatingip_update(context) - - # overriding method from l3_db as original method calls - # self.delete_floatingip() which in turn calls self.delete_port() which - # is locked with 'bsn-port-barrier' - def delete_disassociated_floatingips(self, context, network_id): - query = self._model_query(context, l3_db.FloatingIP) - query = query.filter_by(floating_network_id=network_id, - fixed_port_id=None, - router_id=None) - for fip in query: - context.session.delete(fip) - self._delete_port(context.elevated(), fip['floating_port_id']) - - def _send_floatingip_update(self, context): - try: - ext_net_id = self.get_external_network_id(context) - if ext_net_id: - # Use the elevated state of the context for the ext_net query - admin_context = context.elevated() - ext_net = super(NeutronRestProxyV2, - self).get_network(admin_context, ext_net_id) - # update external network on network controller - self._send_update_network(ext_net, admin_context) - except exceptions.TooManyExternalNetworks: - # get_external_network can raise errors when multiple external - # networks are detected, which isn't supported by the Plugin - LOG.error(_("NeutronRestProxyV2: too many external networks")) - - def _add_host_route(self, context, destination, port): - subnet = {} - for fixed_ip in port['fixed_ips']: - subnet_id = fixed_ip['subnet_id'] - nexthop = fixed_ip['ip_address'] - subnet['host_routes'] = [{'destination': destination, - 'nexthop': nexthop}] - updated_subnet = self.update_subnet(context, - subnet_id, - {'subnet': subnet}) - payload = {'subnet': updated_subnet} - self._dhcp_agent_notifier.notify(context, payload, - 'subnet.update.end') - LOG.debug(_("Adding host route: ")) - LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"), - {'dst': destination, 'next': nexthop}) diff --git a/neutron/plugins/bigswitch/routerrule_db.py b/neutron/plugins/bigswitch/routerrule_db.py deleted file mode 100644 index e947a1f9d..000000000 --- a/neutron/plugins/bigswitch/routerrule_db.py +++ /dev/null @@ -1,148 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013, Big Switch Networks -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.db import l3_db -from neutron.db import model_base -from neutron.openstack.common import log as logging -from neutron.plugins.bigswitch.extensions import routerrule - - -LOG = logging.getLogger(__name__) - - -class RouterRule(model_base.BASEV2): - id = sa.Column(sa.Integer, primary_key=True) - source = sa.Column(sa.String(64), nullable=False) - destination = sa.Column(sa.String(64), nullable=False) - nexthops = orm.relationship('NextHop', cascade='all,delete') - action = sa.Column(sa.String(10), nullable=False) - router_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', - ondelete="CASCADE")) - - -class NextHop(model_base.BASEV2): - rule_id = sa.Column(sa.Integer, - sa.ForeignKey('routerrules.id', - ondelete="CASCADE"), - primary_key=True) - nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True) - - -class RouterRule_db_mixin(l3_db.L3_NAT_db_mixin): - """ Mixin class to support route rule configuration on a router""" - def update_router(self, context, id, router): - r = router['router'] - with context.session.begin(subtransactions=True): - router_db = self._get_router(context, id) - if 'router_rules' in r: - self._update_router_rules(context, - router_db, - r['router_rules']) - updated = super(RouterRule_db_mixin, self).update_router( - context, id, router) - updated['router_rules'] = self._get_router_rules_by_router_id( - context, id) - - return updated - - def create_router(self, context, router): - r = router['router'] - with context.session.begin(subtransactions=True): - router_db = super(RouterRule_db_mixin, self).create_router( - context, router) - if 'router_rules' in r: - self._update_router_rules(context, - router_db, - r['router_rules']) - else: - LOG.debug(_('No rules in router')) - router_db['router_rules'] = self._get_router_rules_by_router_id( - context, router_db['id']) - - return router_db - - def _update_router_rules(self, context, router, rules): - if len(rules) > cfg.CONF.ROUTER.max_router_rules: - raise routerrule.RulesExhausted( - router_id=router['id'], - quota=cfg.CONF.ROUTER.max_router_rules) - del_context = context.session.query(RouterRule) - del_context.filter_by(router_id=router['id']).delete() - context.session.expunge_all() - LOG.debug(_('Updating router rules to %s'), rules) - for rule in rules: - router_rule = RouterRule( - router_id=router['id'], - destination=rule['destination'], - source=rule['source'], - action=rule['action']) - router_rule.nexthops = [NextHop(nexthop=hop) - for hop in rule['nexthops']] - context.session.add(router_rule) - context.session.flush() - - def _make_router_rule_list(self, router_rules): - ruleslist = [] - for rule in router_rules: - hops = [hop['nexthop'] for hop in rule['nexthops']] - ruleslist.append({'id': rule['id'], - 'destination': rule['destination'], - 'source': rule['source'], - 'action': rule['action'], - 'nexthops': hops}) - return ruleslist - - def _get_router_rules_by_router_id(self, context, id): - query = context.session.query(RouterRule) - router_rules = query.filter_by(router_id=id).all() - return self._make_router_rule_list(router_rules) - - def get_router(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - router = super(RouterRule_db_mixin, self).get_router( - context, id, fields) - router['router_rules'] = self._get_router_rules_by_router_id( - context, id) - return router - - def get_routers(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - with context.session.begin(subtransactions=True): - routers = super(RouterRule_db_mixin, self).get_routers( - context, filters, fields, sorts=sorts, limit=limit, - marker=marker, page_reverse=page_reverse) - for router in routers: - router['router_rules'] = self._get_router_rules_by_router_id( - context, router['id']) - return routers - - def get_sync_data(self, context, router_ids=None, active=None): - """Query routers and their related floating_ips, interfaces.""" - with context.session.begin(subtransactions=True): - routers = super(RouterRule_db_mixin, - self).get_sync_data(context, router_ids, - active=active) - for router in routers: - router['router_rules'] = self._get_router_rules_by_router_id( - context, router['id']) - return routers diff --git a/neutron/plugins/bigswitch/servermanager.py b/neutron/plugins/bigswitch/servermanager.py deleted file mode 100644 index caaa10133..000000000 --- a/neutron/plugins/bigswitch/servermanager.py +++ /dev/null @@ -1,595 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2014 Big Switch Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mandeep Dhami, Big Switch Networks, Inc. -# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. -# @author: Kevin Benton, Big Switch Networks, Inc. - -""" -This module manages the HTTP and HTTPS connections to the backend controllers. - -The main class it provides for external use is ServerPool which manages a set -of ServerProxy objects that correspond to individual backend controllers. - -The following functionality is handled by this module: -- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers -- Automatic failover between controllers -- SSL Certificate enforcement -- HTTP Authentication - -""" -import base64 -import httplib -import os -import socket -import ssl - -import eventlet -from oslo.config import cfg - -from neutron.common import exceptions -from neutron.common import utils -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -from neutron.plugins.bigswitch.db import consistency_db as cdb - -LOG = logging.getLogger(__name__) - -# The following are used to invoke the API on the external controller -CAPABILITIES_PATH = "/capabilities" -NET_RESOURCE_PATH = "/tenants/%s/networks" -PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports" -ROUTER_RESOURCE_PATH = "/tenants/%s/routers" -ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces" -NETWORKS_PATH = "/tenants/%s/networks/%s" -FLOATINGIPS_PATH = "/tenants/%s/floatingips/%s" -PORTS_PATH = "/tenants/%s/networks/%s/ports/%s" -ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment" -ROUTERS_PATH = "/tenants/%s/routers/%s" -ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s" -TOPOLOGY_PATH = "/topology" -HEALTH_PATH = "/health" -SUCCESS_CODES = range(200, 207) -FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503, - 504, 505] -BASE_URI = '/networkService/v1.1' -ORCHESTRATION_SERVICE_ID = 'Neutron v2.0' -HASH_MATCH_HEADER = 'X-BSN-BVS-HASH-MATCH' -# error messages -NXNETWORK = 'NXVNS' - - -class RemoteRestError(exceptions.NeutronException): - message = _("Error in REST call to remote network " - "controller: %(reason)s") - status = None - - def __init__(self, **kwargs): - self.status = kwargs.pop('status', None) - self.reason = kwargs.get('reason') - super(RemoteRestError, self).__init__(**kwargs) - - -class ServerProxy(object): - """REST server proxy to a network controller.""" - - def __init__(self, server, port, ssl, auth, neutron_id, timeout, - base_uri, name, mypool, combined_cert): - self.server = server - self.port = port - self.ssl = ssl - self.base_uri = base_uri - self.timeout = timeout - self.name = name - self.success_codes = SUCCESS_CODES - self.auth = None - self.neutron_id = neutron_id - self.failed = False - self.capabilities = [] - # enable server to reference parent pool - self.mypool = mypool - # cache connection here to avoid a SSL handshake for every connection - self.currentconn = None - if auth: - self.auth = 'Basic ' + base64.encodestring(auth).strip() - self.combined_cert = combined_cert - - def get_capabilities(self): - try: - body = self.rest_call('GET', CAPABILITIES_PATH)[2] - self.capabilities = json.loads(body) - except Exception: - LOG.exception(_("Couldn't retrieve capabilities. " - "Newer API calls won't be supported.")) - LOG.info(_("The following capabilities were received " - "for %(server)s: %(cap)s"), {'server': self.server, - 'cap': self.capabilities}) - return self.capabilities - - def rest_call(self, action, resource, data='', headers={}, timeout=False, - reconnect=False): - uri = self.base_uri + resource - body = json.dumps(data) - if not headers: - headers = {} - headers['Content-type'] = 'application/json' - headers['Accept'] = 'application/json' - headers['NeutronProxy-Agent'] = self.name - headers['Instance-ID'] = self.neutron_id - headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID - headers[HASH_MATCH_HEADER] = self.mypool.consistency_hash or '' - if 'keep-alive' in self.capabilities: - headers['Connection'] = 'keep-alive' - else: - reconnect = True - if self.auth: - headers['Authorization'] = self.auth - - LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, " - "ssl=%(ssl)r"), - {'server': self.server, 'port': self.port, 'ssl': self.ssl}) - LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, " - "headers=%(headers)r, action=%(action)s"), - {'resource': resource, 'data': data, 'headers': headers, - 'action': action}) - - # unspecified timeout is False because a timeout can be specified as - # None to indicate no timeout. - if timeout is False: - timeout = self.timeout - - if timeout != self.timeout: - # need a new connection if timeout has changed - reconnect = True - - if not self.currentconn or reconnect: - if self.currentconn: - self.currentconn.close() - if self.ssl: - self.currentconn = HTTPSConnectionWithValidation( - self.server, self.port, timeout=timeout) - if self.currentconn is None: - LOG.error(_('ServerProxy: Could not establish HTTPS ' - 'connection')) - return 0, None, None, None - self.currentconn.combined_cert = self.combined_cert - else: - self.currentconn = httplib.HTTPConnection( - self.server, self.port, timeout=timeout) - if self.currentconn is None: - LOG.error(_('ServerProxy: Could not establish HTTP ' - 'connection')) - return 0, None, None, None - - try: - self.currentconn.request(action, uri, body, headers) - response = self.currentconn.getresponse() - newhash = response.getheader(HASH_MATCH_HEADER) - if newhash: - self._put_consistency_hash(newhash) - respstr = response.read() - respdata = respstr - if response.status in self.success_codes: - try: - respdata = json.loads(respstr) - except ValueError: - # response was not JSON, ignore the exception - pass - ret = (response.status, response.reason, respstr, respdata) - except httplib.HTTPException: - # If we were using a cached connection, try again with a new one. - with excutils.save_and_reraise_exception() as ctxt: - self.currentconn.close() - if reconnect: - # if reconnect is true, this was on a fresh connection so - # reraise since this server seems to be broken - ctxt.reraise = True - else: - # if reconnect is false, it was a cached connection so - # try one more time before re-raising - ctxt.reraise = False - return self.rest_call(action, resource, data, headers, - timeout=timeout, reconnect=True) - except (socket.timeout, socket.error) as e: - self.currentconn.close() - LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), - {'action': action, 'e': e}) - ret = 0, None, None, None - LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, " - "ret=%(ret)s, data=%(data)r"), {'status': ret[0], - 'reason': ret[1], - 'ret': ret[2], - 'data': ret[3]}) - return ret - - def _put_consistency_hash(self, newhash): - self.mypool.consistency_hash = newhash - cdb.put_consistency_hash(newhash) - - -class ServerPool(object): - - def __init__(self, timeout=False, - base_uri=BASE_URI, name='NeutronRestProxy'): - LOG.debug(_("ServerPool: initializing")) - # 'servers' is the list of network controller REST end-points - # (used in order specified till one succeeds, and it is sticky - # till next failure). Use 'server_auth' to encode api-key - servers = cfg.CONF.RESTPROXY.servers - self.auth = cfg.CONF.RESTPROXY.server_auth - self.ssl = cfg.CONF.RESTPROXY.server_ssl - self.neutron_id = cfg.CONF.RESTPROXY.neutron_id - self.base_uri = base_uri - self.name = name - self.timeout = cfg.CONF.RESTPROXY.server_timeout - self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections - default_port = 8000 - if timeout is not False: - self.timeout = timeout - - # Function to use to retrieve topology for consistency syncs. - # Needs to be set by module that uses the servermanager. - self.get_topo_function = None - self.get_topo_function_args = {} - - # Hash to send to backend with request as expected previous - # state to verify consistency. - self.consistency_hash = cdb.get_consistency_hash() - - if not servers: - raise cfg.Error(_('Servers not defined. Aborting server manager.')) - servers = [s if len(s.rsplit(':', 1)) == 2 - else "%s:%d" % (s, default_port) - for s in servers] - if any((len(spl) != 2 or not spl[1].isdigit()) - for spl in [sp.rsplit(':', 1) - for sp in servers]): - raise cfg.Error(_('Servers must be defined as :. ' - 'Configuration was %s') % servers) - self.servers = [ - self.server_proxy_for(server, int(port)) - for server, port in (s.rsplit(':', 1) for s in servers) - ] - eventlet.spawn(self._consistency_watchdog, - cfg.CONF.RESTPROXY.consistency_interval) - LOG.debug(_("ServerPool: initialization done")) - - def get_capabilities(self): - # lookup on first try - try: - return self.capabilities - except AttributeError: - # each server should return a list of capabilities it supports - # e.g. ['floatingip'] - capabilities = [set(server.get_capabilities()) - for server in self.servers] - # Pool only supports what all of the servers support - self.capabilities = set.intersection(*capabilities) - return self.capabilities - - def server_proxy_for(self, server, port): - combined_cert = self._get_combined_cert_for_server(server, port) - return ServerProxy(server, port, self.ssl, self.auth, self.neutron_id, - self.timeout, self.base_uri, self.name, mypool=self, - combined_cert=combined_cert) - - def _get_combined_cert_for_server(self, server, port): - # The ssl library requires a combined file with all trusted certs - # so we make one containing the trusted CAs and the corresponding - # host cert for this server - combined_cert = None - if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation: - base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory - host_dir = os.path.join(base_ssl, 'host_certs') - ca_dir = os.path.join(base_ssl, 'ca_certs') - combined_dir = os.path.join(base_ssl, 'combined') - combined_cert = os.path.join(combined_dir, '%s.pem' % server) - if not os.path.exists(base_ssl): - raise cfg.Error(_('ssl_cert_directory [%s] does not exist. ' - 'Create it or disable ssl.') % base_ssl) - for automake in [combined_dir, ca_dir, host_dir]: - if not os.path.exists(automake): - os.makedirs(automake) - - # get all CA certs - certs = self._get_ca_cert_paths(ca_dir) - - # check for a host specific cert - hcert, exists = self._get_host_cert_path(host_dir, server) - if exists: - certs.append(hcert) - elif cfg.CONF.RESTPROXY.ssl_sticky: - self._fetch_and_store_cert(server, port, hcert) - certs.append(hcert) - if not certs: - raise cfg.Error(_('No certificates were found to verify ' - 'controller %s') % (server)) - self._combine_certs_to_file(certs, combined_cert) - return combined_cert - - def _combine_certs_to_file(self, certs, cfile): - ''' - Concatenates the contents of each certificate in a list of - certificate paths to one combined location for use with ssl - sockets. - ''' - with open(cfile, 'w') as combined: - for c in certs: - with open(c, 'r') as cert_handle: - combined.write(cert_handle.read()) - - def _get_host_cert_path(self, host_dir, server): - ''' - returns full path and boolean indicating existence - ''' - hcert = os.path.join(host_dir, '%s.pem' % server) - if os.path.exists(hcert): - return hcert, True - return hcert, False - - def _get_ca_cert_paths(self, ca_dir): - certs = [os.path.join(root, name) - for name in [ - name for (root, dirs, files) in os.walk(ca_dir) - for name in files - ] - if name.endswith('.pem')] - return certs - - def _fetch_and_store_cert(self, server, port, path): - ''' - Grabs a certificate from a server and writes it to - a given path. - ''' - try: - cert = ssl.get_server_certificate((server, port)) - except Exception as e: - raise cfg.Error(_('Could not retrieve initial ' - 'certificate from controller %(server)s. ' - 'Error details: %(error)s') % - {'server': server, 'error': str(e)}) - - LOG.warning(_("Storing to certificate for host %(server)s " - "at %(path)s") % {'server': server, - 'path': path}) - self._file_put_contents(path, cert) - - return cert - - def _file_put_contents(self, path, contents): - # Simple method to write to file. - # Created for easy Mocking - with open(path, 'w') as handle: - handle.write(contents) - - def server_failure(self, resp, ignore_codes=[]): - """Define failure codes as required. - - Note: We assume 301-303 is a failure, and try the next server in - the server pool. - """ - return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes) - - def action_success(self, resp): - """Defining success codes as required. - - Note: We assume any valid 2xx as being successful response. - """ - return resp[0] in SUCCESS_CODES - - @utils.synchronized('bsn-rest-call') - def rest_call(self, action, resource, data, headers, ignore_codes, - timeout=False): - good_first = sorted(self.servers, key=lambda x: x.failed) - first_response = None - for active_server in good_first: - ret = active_server.rest_call(action, resource, data, headers, - timeout, - reconnect=self.always_reconnect) - # If inconsistent, do a full synchronization - if ret[0] == httplib.CONFLICT: - if not self.get_topo_function: - raise cfg.Error(_('Server requires synchronization, ' - 'but no topology function was defined.')) - data = self.get_topo_function(**self.get_topo_function_args) - active_server.rest_call('PUT', TOPOLOGY_PATH, data, - timeout=None) - # Store the first response as the error to be bubbled up to the - # user since it was a good server. Subsequent servers will most - # likely be cluster slaves and won't have a useful error for the - # user (e.g. 302 redirect to master) - if not first_response: - first_response = ret - if not self.server_failure(ret, ignore_codes): - active_server.failed = False - return ret - else: - LOG.error(_('ServerProxy: %(action)s failure for servers: ' - '%(server)r Response: %(response)s'), - {'action': action, - 'server': (active_server.server, - active_server.port), - 'response': ret[3]}) - LOG.error(_("ServerProxy: Error details: status=%(status)d, " - "reason=%(reason)r, ret=%(ret)s, data=%(data)r"), - {'status': ret[0], 'reason': ret[1], 'ret': ret[2], - 'data': ret[3]}) - active_server.failed = True - - # All servers failed, reset server list and try again next time - LOG.error(_('ServerProxy: %(action)s failure for all servers: ' - '%(server)r'), - {'action': action, - 'server': tuple((s.server, - s.port) for s in self.servers)}) - return first_response - - def rest_action(self, action, resource, data='', errstr='%s', - ignore_codes=[], headers={}, timeout=False): - """ - Wrapper for rest_call that verifies success and raises a - RemoteRestError on failure with a provided error string - By default, 404 errors on DELETE calls are ignored because - they already do not exist on the backend. - """ - if not ignore_codes and action == 'DELETE': - ignore_codes = [404] - resp = self.rest_call(action, resource, data, headers, ignore_codes, - timeout) - if self.server_failure(resp, ignore_codes): - LOG.error(errstr, resp[2]) - raise RemoteRestError(reason=resp[2], status=resp[0]) - if resp[0] in ignore_codes: - LOG.warning(_("NeutronRestProxyV2: Received and ignored error " - "code %(code)s on %(action)s action to resource " - "%(resource)s"), - {'code': resp[2], 'action': action, - 'resource': resource}) - return resp - - def rest_create_router(self, tenant_id, router): - resource = ROUTER_RESOURCE_PATH % tenant_id - data = {"router": router} - errstr = _("Unable to create remote router: %s") - self.rest_action('POST', resource, data, errstr) - - def rest_update_router(self, tenant_id, router, router_id): - resource = ROUTERS_PATH % (tenant_id, router_id) - data = {"router": router} - errstr = _("Unable to update remote router: %s") - self.rest_action('PUT', resource, data, errstr) - - def rest_delete_router(self, tenant_id, router_id): - resource = ROUTERS_PATH % (tenant_id, router_id) - errstr = _("Unable to delete remote router: %s") - self.rest_action('DELETE', resource, errstr=errstr) - - def rest_add_router_interface(self, tenant_id, router_id, intf_details): - resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id) - data = {"interface": intf_details} - errstr = _("Unable to add router interface: %s") - self.rest_action('POST', resource, data, errstr) - - def rest_remove_router_interface(self, tenant_id, router_id, interface_id): - resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id) - errstr = _("Unable to delete remote intf: %s") - self.rest_action('DELETE', resource, errstr=errstr) - - def rest_create_network(self, tenant_id, network): - resource = NET_RESOURCE_PATH % tenant_id - data = {"network": network} - errstr = _("Unable to create remote network: %s") - self.rest_action('POST', resource, data, errstr) - - def rest_update_network(self, tenant_id, net_id, network): - resource = NETWORKS_PATH % (tenant_id, net_id) - data = {"network": network} - errstr = _("Unable to update remote network: %s") - self.rest_action('PUT', resource, data, errstr) - - def rest_delete_network(self, tenant_id, net_id): - resource = NETWORKS_PATH % (tenant_id, net_id) - errstr = _("Unable to update remote network: %s") - self.rest_action('DELETE', resource, errstr=errstr) - - def rest_create_port(self, tenant_id, net_id, port): - resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"]) - data = {"port": port} - device_id = port.get("device_id") - if not port["mac_address"] or not device_id: - # controller only cares about ports attached to devices - LOG.warning(_("No device MAC attached to port %s. " - "Skipping notification to controller."), port["id"]) - return - data["attachment"] = {"id": device_id, - "mac": port["mac_address"]} - errstr = _("Unable to create remote port: %s") - self.rest_action('PUT', resource, data, errstr) - - def rest_delete_port(self, tenant_id, network_id, port_id): - resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id) - errstr = _("Unable to delete remote port: %s") - self.rest_action('DELETE', resource, errstr=errstr) - - def rest_update_port(self, tenant_id, net_id, port): - # Controller has no update operation for the port endpoint - # the create PUT method will replace - self.rest_create_port(tenant_id, net_id, port) - - def rest_create_floatingip(self, tenant_id, floatingip): - resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id']) - errstr = _("Unable to create floating IP: %s") - self.rest_action('PUT', resource, errstr=errstr) - - def rest_update_floatingip(self, tenant_id, floatingip, oldid): - resource = FLOATINGIPS_PATH % (tenant_id, oldid) - errstr = _("Unable to update floating IP: %s") - self.rest_action('PUT', resource, errstr=errstr) - - def rest_delete_floatingip(self, tenant_id, oldid): - resource = FLOATINGIPS_PATH % (tenant_id, oldid) - errstr = _("Unable to delete floating IP: %s") - self.rest_action('DELETE', resource, errstr=errstr) - - def _consistency_watchdog(self, polling_interval=60): - if 'consistency' not in self.get_capabilities(): - LOG.warning(_("Backend server(s) do not support automated " - "consitency checks.")) - return - if not polling_interval: - LOG.warning(_("Consistency watchdog disabled by polling interval " - "setting of %s."), polling_interval) - return - while True: - # If consistency is supported, all we have to do is make any - # rest call and the consistency header will be added. If it - # doesn't match, the backend will return a synchronization error - # that will be handled by the rest_action. - eventlet.sleep(polling_interval) - try: - self.rest_action('GET', HEALTH_PATH) - except Exception: - LOG.exception(_("Encountered an error checking controller " - "health.")) - - -class HTTPSConnectionWithValidation(httplib.HTTPSConnection): - - # If combined_cert is None, the connection will continue without - # any certificate validation. - combined_cert = None - - def connect(self): - try: - sock = socket.create_connection((self.host, self.port), - self.timeout, self.source_address) - except AttributeError: - # python 2.6 doesn't have the source_address attribute - sock = socket.create_connection((self.host, self.port), - self.timeout) - if self._tunnel_host: - self.sock = sock - self._tunnel() - - if self.combined_cert: - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - cert_reqs=ssl.CERT_REQUIRED, - ca_certs=self.combined_cert) - else: - self.sock = ssl.wrap_socket(sock, self.key_file, - self.cert_file, - cert_reqs=ssl.CERT_NONE) diff --git a/neutron/plugins/bigswitch/tests/__init__.py b/neutron/plugins/bigswitch/tests/__init__.py deleted file mode 100644 index 2a2421616..000000000 --- a/neutron/plugins/bigswitch/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Big Switch Networks, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# diff --git a/neutron/plugins/bigswitch/tests/test_server.py b/neutron/plugins/bigswitch/tests/test_server.py deleted file mode 100755 index ee0c2be3d..000000000 --- a/neutron/plugins/bigswitch/tests/test_server.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Big Switch Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mandeep Dhami, Big Switch Networks, Inc. - -"""Test server mocking a REST based network ctrl. - -Used for NeutronRestProxy tests -""" -from __future__ import print_function - -import re - -from six import moves -from wsgiref import simple_server - -from neutron.openstack.common import jsonutils as json - - -class TestNetworkCtrl(object): - - def __init__(self, host='', port=8000, - default_status='404 Not Found', - default_response='404 Not Found', - debug=False): - self.host = host - self.port = port - self.default_status = default_status - self.default_response = default_response - self.debug = debug - self.debug_env = False - self.debug_resp = False - self.matches = [] - - def match(self, prior, method_regexp, uri_regexp, handler, data=None, - multi=True): - """Add to the list of exptected inputs. - - The incoming request is matched in the order of priority. For same - priority, match the oldest match request first. - - :param prior: intgere priority of this match (e.g. 100) - :param method_regexp: regexp to match method (e.g. 'PUT|POST') - :param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/') - :param handler: function with signature: - lambda(method, uri, body, **kwargs) : status, body - where - - method: HTTP method for this request - - uri: URI for this HTTP request - - body: body of this HTTP request - - kwargs are: - - data: data object that was in the match call - - node: TestNetworkCtrl object itself - - id: offset of the matching tuple - and return values is: - (status, body) where: - - status: HTTP resp status (e.g. '200 OK'). - If None, use default_status - - body: HTTP resp body. If None, use '' - """ - assert int(prior) == prior, 'Priority should an integer be >= 0' - assert prior >= 0, 'Priority should an integer be >= 0' - - lo, hi = 0, len(self.matches) - while lo < hi: - mid = (lo + hi) // 2 - if prior < self.matches[mid]: - hi = mid - else: - lo = mid + 1 - self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler, - data, multi)) - - def remove_id(self, id_): - assert id_ >= 0, 'remove_id: id < 0' - assert id_ <= len(self.matches), 'remove_id: id > len()' - self.matches.pop(id_) - - def request_handler(self, method, uri, body): - retstatus = self.default_status - retbody = self.default_response - for i in moves.xrange(len(self.matches)): - (prior, method_regexp, uri_regexp, handler, data, multi) = \ - self.matches[i] - if re.match(method_regexp, method) and re.match(uri_regexp, uri): - kwargs = { - 'data': data, - 'node': self, - 'id': i, - } - retstatus, retbody = handler(method, uri, body, **kwargs) - if multi is False: - self.remove_id(i) - break - if retbody is None: - retbody = '' - return (retstatus, retbody) - - def server(self): - def app(environ, start_response): - uri = environ['PATH_INFO'] - method = environ['REQUEST_METHOD'] - headers = [('Content-type', 'text/json')] - content_len_str = environ['CONTENT_LENGTH'] - - content_len = 0 - request_data = None - if content_len_str: - content_len = int(content_len_str) - request_data = environ.get('wsgi.input').read(content_len) - if request_data: - try: - request_data = json.loads(request_data) - except Exception: - # OK for it not to be json! Ignore it - pass - - if self.debug: - print('\n') - if self.debug_env: - print('environ:') - for (key, value) in sorted(environ.iteritems()): - print(' %16s : %s' % (key, value)) - - print('%s %s' % (method, uri)) - if request_data: - print('%s' % - json.dumps(request_data, sort_keys=True, indent=4)) - - status, body = self.request_handler(method, uri, None) - body_data = None - if body: - try: - body_data = json.loads(body) - except Exception: - # OK for it not to be json! Ignore it - pass - - start_response(status, headers) - if self.debug: - if self.debug_env: - print('%s: %s' % ('Response', - json.dumps(body_data, sort_keys=True, indent=4))) - return body - return simple_server.make_server(self.host, self.port, app) - - def run(self): - print("Serving on port %d ..." % self.port) - try: - self.server().serve_forever() - except KeyboardInterrupt: - pass - - -if __name__ == "__main__": - import sys - - port = 8899 - if len(sys.argv) > 1: - port = int(sys.argv[1]) - - debug = False - if len(sys.argv) > 2: - if sys.argv[2].lower() in ['debug', 'true']: - debug = True - - ctrl = TestNetworkCtrl(port=port, - default_status='200 OK', - default_response='{"status":"200 OK"}', - debug=debug) - ctrl.match(100, 'GET', '/test', - lambda m, u, b, **k: ('200 OK', '["200 OK"]')) - ctrl.run() diff --git a/neutron/plugins/bigswitch/vcsversion.py b/neutron/plugins/bigswitch/vcsversion.py deleted file mode 100644 index 6ed5e2680..000000000 --- a/neutron/plugins/bigswitch/vcsversion.py +++ /dev/null @@ -1,27 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Big Switch Networks, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com -# -version_info = {'branch_nick': u'neutron/trunk', - 'revision_id': u'1', - 'revno': 0} - - -NEUTRONRESTPROXY_VERSION = ['2013', '1', None] - - -FINAL = False # This becomes true at Release Candidate time diff --git a/neutron/plugins/bigswitch/version.py b/neutron/plugins/bigswitch/version.py deleted file mode 100755 index 2069d0bc8..000000000 --- a/neutron/plugins/bigswitch/version.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation -# Copyright 2012, Big Switch Networks, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Based on openstack generic code -# @author: Mandeep Dhami, Big Switch Networks, Inc. - -"""Determine version of NeutronRestProxy plugin""" -from __future__ import print_function - -from neutron.plugins.bigswitch import vcsversion - - -YEAR, COUNT, REVISION = vcsversion.NEUTRONRESTPROXY_VERSION - - -def canonical_version_string(): - return '.'.join(filter(None, - vcsversion.NEUTRONRESTPROXY_VERSION)) - - -def version_string(): - if vcsversion.FINAL: - return canonical_version_string() - else: - return '%s-dev' % (canonical_version_string(),) - - -def vcs_version_string(): - return "%s:%s" % (vcsversion.version_info['branch_nick'], - vcsversion.version_info['revision_id']) - - -def version_string_with_vcs(): - return "%s-%s" % (canonical_version_string(), vcs_version_string()) - - -if __name__ == "__main__": - print(version_string_with_vcs()) diff --git a/neutron/plugins/brocade/NeutronPlugin.py b/neutron/plugins/brocade/NeutronPlugin.py deleted file mode 100644 index c633085d0..000000000 --- a/neutron/plugins/brocade/NeutronPlugin.py +++ /dev/null @@ -1,497 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Shiv Haris (sharis@brocade.com) -# Varma Bhupatiraju (vbhupati@#brocade.com) -# -# (Some parts adapted from LinuxBridge Plugin) -# TODO(shiv) need support for security groups - - -"""Implentation of Brocade Neutron Plugin.""" - -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_base -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import portbindings -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import context -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.brocade.db import models as brocade_db -from neutron.plugins.brocade import vlanbm as vbm -from neutron.plugins.common import constants as svc_constants - - -LOG = logging.getLogger(__name__) -PLUGIN_VERSION = 0.88 -AGENT_OWNER_PREFIX = "network:" -NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver' - -SWITCH_OPTS = [cfg.StrOpt('address', default='', - help=_('The address of the host to SSH to')), - cfg.StrOpt('username', default='', - help=_('The SSH username to use')), - cfg.StrOpt('password', default='', secret=True, - help=_('The SSH password to use')), - cfg.StrOpt('ostype', default='NOS', - help=_('Currently unused')) - ] - -PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0', - help=_('The network interface to use when creating' - 'a port')) - ] - -cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH") -cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE") - - -class BridgeRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - """Agent callback.""" - - RPC_API_VERSION = '1.1' - # Device names start with "tap" - # history - # 1.1 Support Security Group RPC - TAP_PREFIX_LEN = 3 - - @classmethod - def get_port_from_device(cls, device): - """Get port from the brocade specific db.""" - - # TODO(shh) context is not being passed as - # an argument to this function; - # - # need to be fixed in: - # file: neutron/db/securtygroups_rpc_base.py - # function: securitygroup_rules_for_devices() - # which needs to pass context to us - - # Doing what other plugins are doing - session = db.get_session() - port = brocade_db.get_port_from_device( - session, device[cls.TAP_PREFIX_LEN:]) - - # TODO(shiv): need to extend the db model to include device owners - # make it appears that the device owner is of type network - if port: - port['device'] = device - port['device_owner'] = AGENT_OWNER_PREFIX - port['binding:vif_type'] = 'bridge' - return port - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:]) - if port: - entry = {'device': device, - 'vlan_id': port.vlan_id, - 'network_id': port.network_id, - 'port_id': port.port_id, - 'physical_network': port.physical_interface, - 'admin_state_up': port.admin_state_up - } - - else: - entry = {'device': device} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - - device = kwargs.get('device') - port = self.get_port_from_device(device) - if port: - entry = {'device': device, - 'exists': True} - # Set port status to DOWN - port_id = port['port_id'] - brocade_db.update_port_state(rpc_context, port_id, False) - else: - entry = {'device': device, - 'exists': False} - LOG.debug(_("%s can not be found in database"), device) - return entry - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - """Agent side of the linux bridge rpc API. - - API version history: - 1.0 - Initial version. - 1.1 - Added get_active_networks_info, create_dhcp_port, - and update_dhcp_port methods. - - """ - - BASE_RPC_API_VERSION = '1.1' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic = topic - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - - def network_delete(self, context, network_id): - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, physical_network, vlan_id): - self.fanout_cast(context, - self.make_msg('port_update', - port=port, - physical_network=physical_network, - vlan_id=vlan_id), - topic=self.topic_port_update) - - -class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - portbindings_base.PortBindingBaseMixin): - """BrocadePluginV2 is a Neutron plugin. - - Provides L2 Virtual Network functionality using VDX. Upper - layer driver class that interfaces to NETCONF layer below. - - """ - - def __init__(self): - """Initialize Brocade Plugin. - - Specify switch address and db configuration. - """ - - super(BrocadePluginV2, self).__init__() - self.supported_extension_aliases = ["binding", "security-group", - "external-net", "router", - "extraroute", "agent", - "l3_agent_scheduler", - "dhcp_agent_scheduler"] - - self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE. - physical_interface) - self.base_binding_dict = self._get_base_binding_dict() - portbindings_base.register_port_dict_function() - self.ctxt = context.get_admin_context() - self.ctxt.session = db.get_session() - self._vlan_bitmap = vbm.VlanBitmap(self.ctxt) - self._setup_rpc() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - cfg.CONF.router_scheduler_driver - ) - self.brocade_init() - - def brocade_init(self): - """Brocade specific initialization.""" - - self._switch = {'address': cfg.CONF.SWITCH.address, - 'username': cfg.CONF.SWITCH.username, - 'password': cfg.CONF.SWITCH.password - } - self._driver = importutils.import_object(NOS_DRIVER) - - def _setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.rpc_context = context.RequestContext('neutron', 'neutron', - is_admin=False) - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [BridgeRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - self.notifier = AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( - l3_rpc_agent_api.L3AgentNotifyAPI() - ) - - def create_network(self, context, network): - """Create network. - - This call to create network translates to creation of port-profile on - the physical switch. - """ - - with context.session.begin(subtransactions=True): - net = super(BrocadePluginV2, self).create_network(context, network) - net_uuid = net['id'] - vlan_id = self._vlan_bitmap.get_next_vlan(None) - switch = self._switch - try: - self._driver.create_network(switch['address'], - switch['username'], - switch['password'], - vlan_id) - except Exception: - # Proper formatting - LOG.exception(_("Brocade NOS driver error")) - LOG.debug(_("Returning the allocated vlan (%d) to the pool"), - vlan_id) - self._vlan_bitmap.release_vlan(int(vlan_id)) - raise Exception(_("Brocade plugin raised exception, " - "check logs")) - - brocade_db.create_network(context, net_uuid, vlan_id) - self._process_l3_create(context, net, network['network']) - - LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id) - return net - - def delete_network(self, context, net_id): - """Delete network. - - This call to delete the network translates to removing the - port-profile on the physical switch. - """ - - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, net_id) - result = super(BrocadePluginV2, self).delete_network(context, - net_id) - # we must delete all ports in db first (foreign key constraint) - # there is no need to delete port in the driver (its a no-op) - # (actually: note there is no such call to the driver) - bports = brocade_db.get_ports(context, net_id) - for bport in bports: - brocade_db.delete_port(context, bport['port_id']) - - # find the vlan for this network - net = brocade_db.get_network(context, net_id) - vlan_id = net['vlan'] - - # Tell hw to do remove PP - switch = self._switch - try: - self._driver.delete_network(switch['address'], - switch['username'], - switch['password'], - vlan_id) - except Exception: - # Proper formatting - LOG.exception(_("Brocade NOS driver error")) - raise Exception(_("Brocade plugin raised exception, " - "check logs")) - - # now ok to delete the network - brocade_db.delete_network(context, net_id) - - # relinquish vlan in bitmap - self._vlan_bitmap.release_vlan(int(vlan_id)) - return result - - def update_network(self, context, id, network): - - session = context.session - with session.begin(subtransactions=True): - net = super(BrocadePluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - return net - - def create_port(self, context, port): - """Create logical port on the switch.""" - - tenant_id = port['port']['tenant_id'] - network_id = port['port']['network_id'] - admin_state_up = port['port']['admin_state_up'] - - physical_interface = self.physical_interface - - with context.session.begin(subtransactions=True): - bnet = brocade_db.get_network(context, network_id) - vlan_id = bnet['vlan'] - - neutron_port = super(BrocadePluginV2, self).create_port(context, - port) - self._process_portbindings_create_and_update(context, - port['port'], - neutron_port) - interface_mac = neutron_port['mac_address'] - port_id = neutron_port['id'] - - switch = self._switch - - # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx - mac = self.mac_reformat_62to34(interface_mac) - try: - self._driver.associate_mac_to_network(switch['address'], - switch['username'], - switch['password'], - vlan_id, - mac) - except Exception: - # Proper formatting - LOG.exception(_("Brocade NOS driver error")) - raise Exception(_("Brocade plugin raised exception, " - "check logs")) - - # save to brocade persistent db - brocade_db.create_port(context, port_id, network_id, - physical_interface, - vlan_id, tenant_id, admin_state_up) - - # apply any extensions - return neutron_port - - def delete_port(self, context, port_id): - with context.session.begin(subtransactions=True): - neutron_port = self.get_port(context, port_id) - interface_mac = neutron_port['mac_address'] - # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx - mac = self.mac_reformat_62to34(interface_mac) - - brocade_port = brocade_db.get_port(context, port_id) - vlan_id = brocade_port['vlan_id'] - - switch = self._switch - try: - self._driver.dissociate_mac_from_network(switch['address'], - switch['username'], - switch['password'], - vlan_id, - mac) - except Exception: - LOG.exception(_("Brocade NOS driver error")) - raise Exception( - _("Brocade plugin raised exception, check logs")) - - super(BrocadePluginV2, self).delete_port(context, port_id) - brocade_db.delete_port(context, port_id) - - def update_port(self, context, port_id, port): - original_port = self.get_port(context, port_id) - session = context.session - port_updated = False - with session.begin(subtransactions=True): - # delete the port binding and read it with the new rules - if ext_sg.SECURITYGROUPS in port['port']: - port['port'][ext_sg.SECURITYGROUPS] = ( - self._get_security_groups_on_port(context, port)) - self._delete_port_security_group_bindings(context, port_id) - # process_port_create_security_group also needs port id - port['port']['id'] = port_id - self._process_port_create_security_group( - context, - port['port'], - port['port'][ext_sg.SECURITYGROUPS]) - port_updated = True - port_data = port['port'] - port = super(BrocadePluginV2, self).update_port( - context, port_id, port) - self._process_portbindings_create_and_update(context, - port_data, - port) - if original_port['admin_state_up'] != port['admin_state_up']: - port_updated = True - - if (original_port['fixed_ips'] != port['fixed_ips'] or - not utils.compare_elements( - original_port.get(ext_sg.SECURITYGROUPS), - port.get(ext_sg.SECURITYGROUPS))): - self.notifier.security_groups_member_updated( - context, port.get(ext_sg.SECURITYGROUPS)) - - if port_updated: - self._notify_port_updated(context, port) - - return port - - def _notify_port_updated(self, context, port): - port_id = port['id'] - bport = brocade_db.get_port(context, port_id) - self.notifier.port_update(context, port, - bport.physical_interface, - bport.vlan_id) - - def _get_base_binding_dict(self): - binding = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - return binding - - def get_plugin_version(self): - """Get version number of the plugin.""" - return PLUGIN_VERSION - - @staticmethod - def mac_reformat_62to34(interface_mac): - """Transform MAC address format. - - Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" - to 3 groups of 4 hexadecimals numbers delimited by ".". - - :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx - :type interface_mac: string - :returns: MAC address in the format xxxx.xxxx.xxxx - :rtype: string - """ - - mac = interface_mac.replace(":", "") - mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] - return mac diff --git a/neutron/plugins/brocade/README.md b/neutron/plugins/brocade/README.md deleted file mode 100644 index 82b3ad89d..000000000 --- a/neutron/plugins/brocade/README.md +++ /dev/null @@ -1,112 +0,0 @@ -Brocade Openstack Neutron Plugin -================================ - -* up-to-date version of these instructions are located at: - http://wiki.openstack.org/brocade-neutron-plugin - -* N.B.: Please see Prerequisites section regarding ncclient (netconf client library) - -* Supports VCS (Virtual Cluster of Switches) - - -Openstack Brocade Neutron Plugin implements the Neutron v2.0 API. - -This plugin is meant to orchestrate Brocade VCS switches running NOS, examples of these are: - - 1. VDX 67xx series of switches - 2. VDX 87xx series of switches - -Brocade Neutron plugin implements the Neutron v2.0 API. It uses NETCONF at the backend -to configure the Brocade switch. - - +------------+ +------------+ +-------------+ - | | | | | | - | | | | | Brocade | - | Openstack | v2.0 | Brocade | NETCONF | VCS Switch | - | Neutron +--------+ Neutron +----------+ | - | | | Plugin | | VDX 67xx | - | | | | | VDX 87xx | - | | | | | | - | | | | | | - +------------+ +------------+ +-------------+ - - -Directory Structure -=================== - -Normally you will have your Openstack directory structure as follows: - - /opt/stack/nova/ - /opt/stack/horizon/ - ... - /opt/stack/neutron/neutron/plugins/ - -Within this structure, Brocade plugin resides at: - - /opt/stack/neutron/neutron/plugins/brocade - - -Prerequsites -============ - -This plugin requires installation of the python netconf client (ncclient) library: - -ncclient v0.3.1 - Python library for NETCONF clients available at http://github.com/brocade/ncclient - - % git clone https://www.github.com/brocade/ncclient - % cd ncclient; sudo python ./setup.py install - - -Configuration -============= - -1. Specify to Neutron that you will be using the Brocade Plugin - this is done -by setting the parameter core_plugin in Neutron: - - core_plugin = neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2 - -2. Physical switch configuration parameters and Brocade specific database configuration is specified in -the configuration file specified in the brocade.ini files: - - % cat /etc/neutron/plugins/brocade/brocade.ini - [SWITCH] - username = admin - password = password - address = - ostype = NOS - - [database] - connection = mysql://root:pass@localhost/brocade_neutron?charset=utf8 - - (please see list of more configuration parameters in the brocade.ini file) - -Running Setup.py -================ - -Running setup.py with appropriate permissions will copy the default configuration -file to /etc/neutron/plugins/brocade/brocade.ini. This file MUST be edited to -suit your setup/environment. - - % cd /opt/stack/neutron/neutron/plugins/brocade - % python setup.py - - -Devstack -======== - -Please see special notes for devstack at: -http://wiki.openstack.org/brocade-neutron-plugin - -In order to use Brocade Neutron Plugin, add the following lines in localrc, if localrc file doe - not exist create one: - -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,neutron,q-svc,q-agt -Q_PLUGIN=brocade - -As part of running devstack/stack.sh, the configuration files is copied as: - - % cp /opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini /etc/neutron/plugins/brocade/brocade.ini - -(hence it is important to make any changes to the configuration in: -/opt/stack/neutron/etc/neutron/plugins/brocade/brocade.ini) - diff --git a/neutron/plugins/brocade/__init__.py b/neutron/plugins/brocade/__init__.py deleted file mode 100644 index c22f863e3..000000000 --- a/neutron/plugins/brocade/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/brocade/db/__init__.py b/neutron/plugins/brocade/db/__init__.py deleted file mode 100644 index c22f863e3..000000000 --- a/neutron/plugins/brocade/db/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/brocade/db/models.py b/neutron/plugins/brocade/db/models.py deleted file mode 100644 index d9b3663a1..000000000 --- a/neutron/plugins/brocade/db/models.py +++ /dev/null @@ -1,151 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Shiv Haris (sharis@brocade.com) -# Varma Bhupatiraju (vbhupati@#brocade.com) - - -"""Brocade specific database schema/model.""" - -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 - - -class BrocadeNetwork(model_base.BASEV2, models_v2.HasId): - """Schema for brocade network.""" - - vlan = sa.Column(sa.String(10)) - - -class BrocadePort(model_base.BASEV2): - """Schema for brocade port.""" - - port_id = sa.Column(sa.String(36), primary_key=True, default="") - network_id = sa.Column(sa.String(36), - sa.ForeignKey("brocadenetworks.id"), - nullable=False) - admin_state_up = sa.Column(sa.Boolean, nullable=False) - physical_interface = sa.Column(sa.String(36)) - vlan_id = sa.Column(sa.String(36)) - tenant_id = sa.Column(sa.String(36)) - - -def create_network(context, net_id, vlan): - """Create a brocade specific network/port-profiles.""" - - session = context.session - with session.begin(subtransactions=True): - net = BrocadeNetwork(id=net_id, vlan=vlan) - session.add(net) - - return net - - -def delete_network(context, net_id): - """Delete a brocade specific network/port-profiles.""" - - session = context.session - with session.begin(subtransactions=True): - net = (session.query(BrocadeNetwork).filter_by(id=net_id).first()) - if net is not None: - session.delete(net) - - -def get_network(context, net_id, fields=None): - """Get brocade specific network, with vlan extension.""" - - session = context.session - return (session.query(BrocadeNetwork).filter_by(id=net_id).first()) - - -def get_networks(context, filters=None, fields=None): - """Get all brocade specific networks.""" - - session = context.session - try: - nets = session.query(BrocadeNetwork).all() - return nets - except sa.exc.SQLAlchemyError: - return None - - -def create_port(context, port_id, network_id, physical_interface, - vlan_id, tenant_id, admin_state_up): - """Create a brocade specific port, has policy like vlan.""" - - # port_id is truncated: since the linux-bridge tap device names are - # based on truncated port id, this enables port lookups using - # tap devices - port_id = port_id[0:11] - session = context.session - with session.begin(subtransactions=True): - port = BrocadePort(port_id=port_id, - network_id=network_id, - physical_interface=physical_interface, - vlan_id=vlan_id, - admin_state_up=admin_state_up, - tenant_id=tenant_id) - session.add(port) - return port - - -def get_port(context, port_id): - """get a brocade specific port.""" - - port_id = port_id[0:11] - session = context.session - port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) - return port - - -def get_ports(context, network_id=None): - """get a brocade specific port.""" - - session = context.session - ports = (session.query(BrocadePort).filter_by(network_id=network_id).all()) - return ports - - -def delete_port(context, port_id): - """delete brocade specific port.""" - - port_id = port_id[0:11] - session = context.session - with session.begin(subtransactions=True): - port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) - if port is not None: - session.delete(port) - - -def get_port_from_device(session, port_id): - """get port from the tap device.""" - - # device is same as truncated port_id - port = (session.query(BrocadePort).filter_by(port_id=port_id).first()) - return port - - -def update_port_state(context, port_id, admin_state_up): - """Update port attributes.""" - - port_id = port_id[0:11] - session = context.session - session.query(BrocadePort).filter_by( - port_id=port_id).update({'admin_state_up': admin_state_up}) diff --git a/neutron/plugins/brocade/nos/__init__.py b/neutron/plugins/brocade/nos/__init__.py deleted file mode 100644 index 9d4562b0d..000000000 --- a/neutron/plugins/brocade/nos/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (c) 2013 Brocade Communications Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/brocade/nos/fake_nosdriver.py b/neutron/plugins/brocade/nos/fake_nosdriver.py deleted file mode 100644 index 8984768d5..000000000 --- a/neutron/plugins/brocade/nos/fake_nosdriver.py +++ /dev/null @@ -1,117 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""FAKE DRIVER, for unit tests purposes. - -Brocade NOS Driver implements NETCONF over SSHv2 for -Neutron network life-cycle management. -""" - - -class NOSdriver(): - """NOS NETCONF interface driver for Neutron network. - - Fake: Handles life-cycle management of Neutron network, - leverages AMPP on NOS - (for use by unit tests, avoids touching any hardware) - """ - - def __init__(self): - pass - - def connect(self, host, username, password): - """Connect via SSH and initialize the NETCONF session.""" - pass - - def create_network(self, host, username, password, net_id): - """Creates a new virtual network.""" - pass - - def delete_network(self, host, username, password, net_id): - """Deletes a virtual network.""" - pass - - def associate_mac_to_network(self, host, username, password, - net_id, mac): - """Associates a MAC address to virtual network.""" - pass - - def dissociate_mac_from_network(self, host, username, password, - net_id, mac): - """Dissociates a MAC address from virtual network.""" - pass - - def create_vlan_interface(self, mgr, vlan_id): - """Configures a VLAN interface.""" - pass - - def delete_vlan_interface(self, mgr, vlan_id): - """Deletes a VLAN interface.""" - pass - - def get_port_profiles(self, mgr): - """Retrieves all port profiles.""" - pass - - def get_port_profile(self, mgr, name): - """Retrieves a port profile.""" - pass - - def create_port_profile(self, mgr, name): - """Creates a port profile.""" - pass - - def delete_port_profile(self, mgr, name): - """Deletes a port profile.""" - pass - - def activate_port_profile(self, mgr, name): - """Activates a port profile.""" - pass - - def deactivate_port_profile(self, mgr, name): - """Deactivates a port profile.""" - pass - - def associate_mac_to_port_profile(self, mgr, name, mac_address): - """Associates a MAC address to a port profile.""" - pass - - def dissociate_mac_from_port_profile(self, mgr, name, mac_address): - """Dissociates a MAC address from a port profile.""" - pass - - def create_vlan_profile_for_port_profile(self, mgr, name): - """Creates VLAN sub-profile for port profile.""" - pass - - def configure_l2_mode_for_vlan_profile(self, mgr, name): - """Configures L2 mode for VLAN sub-profile.""" - pass - - def configure_trunk_mode_for_vlan_profile(self, mgr, name): - """Configures trunk mode for VLAN sub-profile.""" - pass - - def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): - """Configures allowed VLANs for VLAN sub-profile.""" - pass diff --git a/neutron/plugins/brocade/nos/nctemplates.py b/neutron/plugins/brocade/nos/nctemplates.py deleted file mode 100644 index 48071dbcd..000000000 --- a/neutron/plugins/brocade/nos/nctemplates.py +++ /dev/null @@ -1,204 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright (c) 2013 Brocade Communications Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""NOS NETCONF XML Configuration Command Templates. - -Interface Configuration Commands -""" - -# Create VLAN (vlan_id) -CREATE_VLAN_INTERFACE = """ - - - - - {vlan_id} - - - - -""" - -# Delete VLAN (vlan_id) -DELETE_VLAN_INTERFACE = """ - - - - - {vlan_id} - - - - -""" - -# -# AMPP Life-cycle Management Configuration Commands -# - -# Create AMPP port-profile (port_profile_name) -CREATE_PORT_PROFILE = """ - - - {name} - - -""" - -# Create VLAN sub-profile for port-profile (port_profile_name) -CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """ - - - {name} - - - -""" - -# Configure L2 mode for VLAN sub-profile (port_profile_name) -CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """ - - - {name} - - - - - -""" - -# Configure trunk mode for VLAN sub-profile (port_profile_name) -CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """ - - - {name} - - - - trunk - - - - - -""" - -# Configure allowed VLANs for VLAN sub-profile -# (port_profile_name, allowed_vlan, native_vlan) -CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """ - - - {name} - - - - - - {vlan_id} - - - - - - - -""" - -# Delete port-profile (port_profile_name) -DELETE_PORT_PROFILE = """ - - - {name} - - -""" - -# Activate port-profile (port_profile_name) -ACTIVATE_PORT_PROFILE = """ - - - - {name} - - - - -""" - -# Deactivate port-profile (port_profile_name) -DEACTIVATE_PORT_PROFILE = """ - - - - {name} - - - - -""" - -# Associate MAC address to port-profile (port_profile_name, mac_address) -ASSOCIATE_MAC_TO_PORT_PROFILE = """ - - - - {name} - - {mac_address} - - - - -""" - -# Dissociate MAC address from port-profile (port_profile_name, mac_address) -DISSOCIATE_MAC_FROM_PORT_PROFILE = """ - - - - {name} - - {mac_address} - - - - -""" - -# -# Custom RPC Commands -# - - -# -# Constants -# - -# Port profile naming convention for Neutron networks -OS_PORT_PROFILE_NAME = "openstack-profile-{id}" - -# Port profile filter expressions -PORT_PROFILE_XPATH_FILTER = "/port-profile" -PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']" diff --git a/neutron/plugins/brocade/nos/nosdriver.py b/neutron/plugins/brocade/nos/nosdriver.py deleted file mode 100644 index ce4c86110..000000000 --- a/neutron/plugins/brocade/nos/nosdriver.py +++ /dev/null @@ -1,233 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""Brocade NOS Driver implements NETCONF over SSHv2 for -Neutron network life-cycle management. -""" - -from ncclient import manager - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.brocade.nos import nctemplates as template - - -LOG = logging.getLogger(__name__) -SSH_PORT = 22 - - -def nos_unknown_host_cb(host, fingerprint): - """An unknown host callback. - - Returns `True` if it finds the key acceptable, - and `False` if not. This default callback for NOS always returns 'True' - (i.e. trusts all hosts for now). - """ - return True - - -class NOSdriver(): - """NOS NETCONF interface driver for Neutron network. - - Handles life-cycle management of Neutron network (leverages AMPP on NOS) - """ - - def __init__(self): - self.mgr = None - - def connect(self, host, username, password): - """Connect via SSH and initialize the NETCONF session.""" - - # Use the persisted NETCONF connection - if self.mgr and self.mgr.connected: - return self.mgr - - # Open new NETCONF connection - try: - self.mgr = manager.connect(host=host, port=SSH_PORT, - username=username, password=password, - unknown_host_cb=nos_unknown_host_cb) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error(_("Connect failed to switch: %s"), e) - - LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), - dict(host=host, ssh_port=SSH_PORT)) - return self.mgr - - def close_session(self): - """Close NETCONF session.""" - if self.mgr: - self.mgr.close_session() - self.mgr = None - - def create_network(self, host, username, password, net_id): - """Creates a new virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.create_vlan_interface(mgr, net_id) - self.create_port_profile(mgr, name) - self.create_vlan_profile_for_port_profile(mgr, name) - self.configure_l2_mode_for_vlan_profile(mgr, name) - self.configure_trunk_mode_for_vlan_profile(mgr, name) - self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) - self.activate_port_profile(mgr, name) - except Exception as ex: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error: %s"), ex) - self.close_session() - - def delete_network(self, host, username, password, net_id): - """Deletes a virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.deactivate_port_profile(mgr, name) - self.delete_port_profile(mgr, name) - self.delete_vlan_interface(mgr, net_id) - except Exception as ex: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error: %s"), ex) - self.close_session() - - def associate_mac_to_network(self, host, username, password, - net_id, mac): - """Associates a MAC address to virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.associate_mac_to_port_profile(mgr, name, mac) - except Exception as ex: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error: %s"), ex) - self.close_session() - - def dissociate_mac_from_network(self, host, username, password, - net_id, mac): - """Dissociates a MAC address from virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.dissociate_mac_from_port_profile(mgr, name, mac) - except Exception as ex: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error: %s"), ex) - self.close_session() - - def create_vlan_interface(self, mgr, vlan_id): - """Configures a VLAN interface.""" - - confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) - - def delete_vlan_interface(self, mgr, vlan_id): - """Deletes a VLAN interface.""" - - confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) - - def get_port_profiles(self, mgr): - """Retrieves all port profiles.""" - - filterstr = template.PORT_PROFILE_XPATH_FILTER - response = mgr.get_config(source='running', - filter=('xpath', filterstr)).data_xml - return response - - def get_port_profile(self, mgr, name): - """Retrieves a port profile.""" - - filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) - response = mgr.get_config(source='running', - filter=('xpath', filterstr)).data_xml - return response - - def create_port_profile(self, mgr, name): - """Creates a port profile.""" - - confstr = template.CREATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def delete_port_profile(self, mgr, name): - """Deletes a port profile.""" - - confstr = template.DELETE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def activate_port_profile(self, mgr, name): - """Activates a port profile.""" - - confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def deactivate_port_profile(self, mgr, name): - """Deactivates a port profile.""" - - confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def associate_mac_to_port_profile(self, mgr, name, mac_address): - """Associates a MAC address to a port profile.""" - - confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( - name=name, mac_address=mac_address) - mgr.edit_config(target='running', config=confstr) - - def dissociate_mac_from_port_profile(self, mgr, name, mac_address): - """Dissociates a MAC address from a port profile.""" - - confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( - name=name, mac_address=mac_address) - mgr.edit_config(target='running', config=confstr) - - def create_vlan_profile_for_port_profile(self, mgr, name): - """Creates VLAN sub-profile for port profile.""" - - confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_l2_mode_for_vlan_profile(self, mgr, name): - """Configures L2 mode for VLAN sub-profile.""" - - confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_trunk_mode_for_vlan_profile(self, mgr, name): - """Configures trunk mode for VLAN sub-profile.""" - - confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): - """Configures allowed VLANs for VLAN sub-profile.""" - - confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( - name=name, vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) diff --git a/neutron/plugins/brocade/tests/README b/neutron/plugins/brocade/tests/README deleted file mode 100644 index 476ca0535..000000000 --- a/neutron/plugins/brocade/tests/README +++ /dev/null @@ -1,24 +0,0 @@ -Start the neutron-server with IP address of switch configured in brocade.ini: -(for configuration instruction please see README.md in the above directory) - -nostest.py: -This tests two things: - 1. Creates port-profile on the physical switch when a neutron 'network' is created - 2. Associates the MAC address with the created port-profile - -noscli.py: - CLI interface to create/delete/associate MAC/dissociate MAC - Commands: - % noscli.py create - (after running check that PP is created on the switch) - - % noscli.py delete - (after running check that PP is deleted from the switch) - - % noscli.py associate - (after running check that MAC is associated with PP) - - % noscli.py dissociate - (after running check that MAC is dissociated from the PP) - - diff --git a/neutron/plugins/brocade/tests/noscli.py b/neutron/plugins/brocade/tests/noscli.py deleted file mode 100644 index 81e988e3c..000000000 --- a/neutron/plugins/brocade/tests/noscli.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2013 Brocade Communications Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""Brocade NOS Driver CLI.""" -from __future__ import print_function - -import argparse - -from neutron.openstack.common import log as logging -from neutron.plugins.brocade.nos import nosdriver as nos - -LOG = logging.getLogger(__name__) - - -class NOSCli(object): - - def __init__(self, host, username, password): - self.host = host - self.username = username - self.password = password - self.driver = nos.NOSdriver() - - def execute(self, cmd): - numargs = len(args.otherargs) - - if args.cmd == 'create' and numargs == 1: - self._create(args.otherargs[0]) - elif args.cmd == 'delete' and numargs == 1: - self._delete(args.otherargs[0]) - elif args.cmd == 'associate' and numargs == 2: - self._associate(args.otherargs[0], args.otherargs[1]) - elif args.cmd == 'dissociate' and numargs == 2: - self._dissociate(args.otherargs[0], args.otherargs[1]) - else: - print(usage_desc) - exit(0) - - def _create(self, net_id): - self.driver.create_network(self.host, self.username, self.password, - net_id) - - def _delete(self, net_id): - self.driver.delete_network(self.host, self.username, self.password, - net_id) - - def _associate(self, net_id, mac): - self.driver.associate_mac_to_network( - self.host, self.username, self.password, net_id, mac) - - def _dissociate(self, net_id, mac): - self.driver.dissociate_mac_from_network( - self.host, self.username, self.password, net_id, mac) - - -usage_desc = """ -Command descriptions: - - create - delete - associate - dissociate -""" - -parser = argparse.ArgumentParser(description='process args', - usage=usage_desc, epilog='foo bar help') -parser.add_argument('--ip', default='localhost') -parser.add_argument('--username', default='admin') -parser.add_argument('--password', default='password') -parser.add_argument('cmd') -parser.add_argument('otherargs', nargs='*') -args = parser.parse_args() - -noscli = NOSCli(args.ip, args.username, args.password) -noscli.execute(args.cmd) diff --git a/neutron/plugins/brocade/tests/nostest.py b/neutron/plugins/brocade/tests/nostest.py deleted file mode 100644 index 72a21ae8b..000000000 --- a/neutron/plugins/brocade/tests/nostest.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2013 Brocade Communications Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""Brocade NOS Driver Test.""" -from __future__ import print_function - -import sys - -from neutron.plugins.brocade.nos import nosdriver as nos - - -def nostest(host, username, password): - # Driver - driver = nos.NOSdriver() - - # Neutron operations - vlan = 1001 - mac = '0050.56bf.0001' - driver.create_network(host, username, password, vlan) - driver.associate_mac_to_network(host, username, password, vlan, mac) - driver.dissociate_mac_from_network(host, username, password, vlan, mac) - driver.delete_network(host, username, password, vlan) - - # AMPP enumeration - with driver.connect(host, username, password) as mgr: - print(driver.get_port_profiles(mgr)) - print(driver.get_port_profile(mgr, 'default')) - - -if __name__ == '__main__': - nostest(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/neutron/plugins/brocade/vlanbm.py b/neutron/plugins/brocade/vlanbm.py deleted file mode 100644 index 3c4b3ccb6..000000000 --- a/neutron/plugins/brocade/vlanbm.py +++ /dev/null @@ -1,60 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Shiv Haris (sharis@brocade.com) -# Varma Bhupatiraju (vbhupati@#brocade.com) - - -"""A Vlan Bitmap class to handle allocation/de-allocation of vlan ids.""" -from six import moves - -from neutron.common import constants -from neutron.plugins.brocade.db import models as brocade_db - - -MIN_VLAN = constants.MIN_VLAN_TAG + 1 -MAX_VLAN = constants.MAX_VLAN_TAG - - -class VlanBitmap(object): - """Setup a vlan bitmap for allocation/de-allocation.""" - - # Keep track of the vlans that have been allocated/de-allocated - # uses a bitmap to do this - - def __init__(self, ctxt): - """Initialize the vlan as a set.""" - self.vlans = set(int(net['vlan']) - for net in brocade_db.get_networks(ctxt) - if net['vlan'] - ) - - def get_next_vlan(self, vlan_id=None): - """Try to get a specific vlan if requested or get the next vlan.""" - min_vlan_search = vlan_id or MIN_VLAN - max_vlan_search = (vlan_id and vlan_id + 1) or MAX_VLAN - - for vlan in moves.xrange(min_vlan_search, max_vlan_search): - if vlan not in self.vlans: - self.vlans.add(vlan) - return vlan - - def release_vlan(self, vlan_id): - """Return the vlan to the pool.""" - if vlan_id in self.vlans: - self.vlans.remove(vlan_id) diff --git a/neutron/plugins/cisco/README b/neutron/plugins/cisco/README deleted file mode 100644 index 2bedb75b1..000000000 --- a/neutron/plugins/cisco/README +++ /dev/null @@ -1,7 +0,0 @@ -Cisco Neutron Virtual Network Plugin - -This plugin implements Neutron v2 APIs and helps configure -topologies consisting of virtual and physical switches. - -For more details on use please refer to: -http://wiki.openstack.org/cisco-neutron diff --git a/neutron/plugins/cisco/__init__.py b/neutron/plugins/cisco/__init__.py deleted file mode 100644 index db695fb0a..000000000 --- a/neutron/plugins/cisco/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# diff --git a/neutron/plugins/cisco/common/__init__.py b/neutron/plugins/cisco/common/__init__.py deleted file mode 100644 index 833357b73..000000000 --- a/neutron/plugins/cisco/common/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/cisco/common/cisco_constants.py b/neutron/plugins/cisco/common/cisco_constants.py deleted file mode 100644 index 2f1992108..000000000 --- a/neutron/plugins/cisco/common/cisco_constants.py +++ /dev/null @@ -1,111 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. - - -# Attachment attributes -INSTANCE_ID = 'instance_id' -TENANT_ID = 'tenant_id' -TENANT_NAME = 'tenant_name' -HOST_NAME = 'host_name' - -# Network attributes -NET_ID = 'id' -NET_NAME = 'name' -NET_VLAN_ID = 'vlan_id' -NET_VLAN_NAME = 'vlan_name' -NET_PORTS = 'ports' - -CREDENTIAL_ID = 'credential_id' -CREDENTIAL_NAME = 'credential_name' -CREDENTIAL_USERNAME = 'user_name' -CREDENTIAL_PASSWORD = 'password' -CREDENTIAL_TYPE = 'type' -MASKED_PASSWORD = '********' - -USERNAME = 'username' -PASSWORD = 'password' - -LOGGER_COMPONENT_NAME = "cisco_plugin" - -NEXUS_PLUGIN = 'nexus_plugin' -VSWITCH_PLUGIN = 'vswitch_plugin' - -DEVICE_IP = 'device_ip' - -NETWORK_ADMIN = 'network_admin' - -NETWORK = 'network' -PORT = 'port' -BASE_PLUGIN_REF = 'base_plugin_ref' -CONTEXT = 'context' -SUBNET = 'subnet' - -#### N1Kv CONSTANTS -# Special vlan_id value in n1kv_vlan_allocations table indicating flat network -FLAT_VLAN_ID = -1 - -# Topic for tunnel notifications between the plugin and agent -TUNNEL = 'tunnel' - -# Maximum VXLAN range configurable for one network profile. -MAX_VXLAN_RANGE = 1000000 - -# Values for network_type -NETWORK_TYPE_FLAT = 'flat' -NETWORK_TYPE_VLAN = 'vlan' -NETWORK_TYPE_VXLAN = 'vxlan' -NETWORK_TYPE_LOCAL = 'local' -NETWORK_TYPE_NONE = 'none' -NETWORK_TYPE_TRUNK = 'trunk' -NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment' - -# Values for network sub_type -NETWORK_TYPE_OVERLAY = 'overlay' -NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan' -NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN -NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY - -# Prefix for VM Network name -VM_NETWORK_NAME_PREFIX = 'vmn_' - -DEFAULT_HTTP_TIMEOUT = 15 -SET = 'set' -INSTANCE = 'instance' -PROPERTIES = 'properties' -NAME = 'name' -ID = 'id' -POLICY = 'policy' -TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET' -ENCAPSULATIONS = 'encapsulations' -STATE = 'state' -ONLINE = 'online' -MAPPINGS = 'mappings' -MAPPING = 'mapping' -SEGMENTS = 'segments' -SEGMENT = 'segment' -BRIDGE_DOMAIN_SUFFIX = '_bd' -LOGICAL_NETWORK_SUFFIX = '_log_net' -ENCAPSULATION_PROFILE_SUFFIX = '_profile' - -UUID_LENGTH = 36 - -# Nexus vlan and vxlan segment range -NEXUS_VLAN_RESERVED_MIN = 3968 -NEXUS_VLAN_RESERVED_MAX = 4047 -NEXUS_VXLAN_MIN = 4096 -NEXUS_VXLAN_MAX = 16000000 diff --git a/neutron/plugins/cisco/common/cisco_credentials_v2.py b/neutron/plugins/cisco/common/cisco_credentials_v2.py deleted file mode 100644 index 5d8fc8ff5..000000000 --- a/neutron/plugins/cisco/common/cisco_credentials_v2.py +++ /dev/null @@ -1,61 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. - -import logging as LOG - -from neutron.plugins.cisco.common import cisco_constants as const -from neutron.plugins.cisco.common import cisco_exceptions as cexc -from neutron.plugins.cisco.common import config -from neutron.plugins.cisco.db import network_db_v2 as cdb - -LOG.basicConfig(level=LOG.WARN) -LOG.getLogger(const.LOGGER_COMPONENT_NAME) - - -class Store(object): - """Credential Store.""" - - @staticmethod - def initialize(): - dev_dict = config.get_device_dictionary() - for key in dev_dict: - dev_id, dev_ip, dev_key = key - if dev_key == const.USERNAME: - try: - cdb.add_credential( - dev_ip, - dev_dict[dev_id, dev_ip, const.USERNAME], - dev_dict[dev_id, dev_ip, const.PASSWORD], - dev_id) - except cexc.CredentialAlreadyExists: - # We are quietly ignoring this, since it only happens - # if this class module is loaded more than once, in - # which case, the credentials are already populated - pass - - @staticmethod - def get_username(cred_name): - """Get the username.""" - credential = cdb.get_credential_name(cred_name) - return credential[const.CREDENTIAL_USERNAME] - - @staticmethod - def get_password(cred_name): - """Get the password.""" - credential = cdb.get_credential_name(cred_name) - return credential[const.CREDENTIAL_PASSWORD] diff --git a/neutron/plugins/cisco/common/cisco_exceptions.py b/neutron/plugins/cisco/common/cisco_exceptions.py deleted file mode 100644 index be50e7665..000000000 --- a/neutron/plugins/cisco/common/cisco_exceptions.py +++ /dev/null @@ -1,236 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Rohit Agarwalla, Cisco Systems, Inc. - -"""Exceptions used by the Cisco plugin.""" - -from neutron.common import exceptions - - -class NetworkSegmentIDNotFound(exceptions.NeutronException): - """Segmentation ID for network is not found.""" - message = _("Segmentation ID for network %(net_id)s is not found.") - - -class NoMoreNics(exceptions.NeutronException): - """No more dynamic NICs are available in the system.""" - message = _("Unable to complete operation. No more dynamic NICs are " - "available in the system.") - - -class NetworkVlanBindingAlreadyExists(exceptions.NeutronException): - """Binding cannot be created, since it already exists.""" - message = _("NetworkVlanBinding for %(vlan_id)s and network " - "%(network_id)s already exists.") - - -class VlanIDNotFound(exceptions.NeutronException): - """VLAN ID cannot be found.""" - message = _("Vlan ID %(vlan_id)s not found.") - - -class VlanIDOutsidePool(exceptions.NeutronException): - """VLAN ID cannot be allocated, since it is outside the configured pool.""" - message = _("Unable to complete operation. VLAN ID exists outside of the " - "configured network segment range.") - - -class VlanIDNotAvailable(exceptions.NeutronException): - """No VLAN ID available.""" - message = _("No Vlan ID available.") - - -class QosNotFound(exceptions.NeutronException): - """QoS level with this ID cannot be found.""" - message = _("QoS level %(qos_id)s could not be found " - "for tenant %(tenant_id)s.") - - -class QosNameAlreadyExists(exceptions.NeutronException): - """QoS Name already exists.""" - message = _("QoS level with name %(qos_name)s already exists " - "for tenant %(tenant_id)s.") - - -class CredentialNotFound(exceptions.NeutronException): - """Credential with this ID cannot be found.""" - message = _("Credential %(credential_id)s could not be found.") - - -class CredentialNameNotFound(exceptions.NeutronException): - """Credential Name could not be found.""" - message = _("Credential %(credential_name)s could not be found.") - - -class CredentialAlreadyExists(exceptions.NeutronException): - """Credential already exists.""" - message = _("Credential %(credential_name)s already exists.") - - -class ProviderNetworkExists(exceptions.NeutronException): - """Provider network already exists.""" - message = _("Provider network %s already exists") - - -class NexusComputeHostNotConfigured(exceptions.NeutronException): - """Connection to compute host is not configured.""" - message = _("Connection to %(host)s is not configured.") - - -class NexusConnectFailed(exceptions.NeutronException): - """Failed to connect to Nexus switch.""" - message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") - - -class NexusConfigFailed(exceptions.NeutronException): - """Failed to configure Nexus switch.""" - message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") - - -class NexusPortBindingNotFound(exceptions.NeutronException): - """NexusPort Binding is not present.""" - message = _("Nexus Port Binding (%(filters)s) is not present.") - - def __init__(self, **kwargs): - filters = ','.join('%s=%s' % i for i in kwargs.items()) - super(NexusPortBindingNotFound, self).__init__(filters=filters) - - -class NoNexusSviSwitch(exceptions.NeutronException): - """No usable nexus switch found.""" - message = _("No usable Nexus switch found to create SVI interface.") - - -class PortVnicBindingAlreadyExists(exceptions.NeutronException): - """PortVnic Binding already exists.""" - message = _("PortVnic Binding %(port_id)s already exists.") - - -class PortVnicNotFound(exceptions.NeutronException): - """PortVnic Binding is not present.""" - message = _("PortVnic Binding %(port_id)s is not present.") - - -class SubnetNotSpecified(exceptions.NeutronException): - """Subnet id not specified.""" - message = _("No subnet_id specified for router gateway.") - - -class SubnetInterfacePresent(exceptions.NeutronException): - """Subnet SVI interface already exists.""" - message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") - - -class PortIdForNexusSvi(exceptions.NeutronException): - """Port Id specified for Nexus SVI.""" - message = _('Nexus hardware router gateway only uses Subnet Ids.') - - -class InvalidDetach(exceptions.NeutronException): - message = _("Unable to unplug the attachment %(att_id)s from port " - "%(port_id)s for network %(net_id)s. The attachment " - "%(att_id)s does not exist.") - - -class PolicyProfileAlreadyExists(exceptions.NeutronException): - """Policy Profile cannot be created since it already exists.""" - message = _("Policy Profile %(profile_id)s " - "already exists.") - - -class PolicyProfileIdNotFound(exceptions.NotFound): - """Policy Profile with the given UUID cannot be found.""" - message = _("Policy Profile %(profile_id)s could not be found.") - - -class NetworkProfileAlreadyExists(exceptions.NeutronException): - """Network Profile cannot be created since it already exists.""" - message = _("Network Profile %(profile_id)s " - "already exists.") - - -class NetworkProfileNotFound(exceptions.NotFound): - """Network Profile with the given UUID/name cannot be found.""" - message = _("Network Profile %(profile)s could not be found.") - - -class NetworkProfileInUse(exceptions.InUse): - """Network Profile with the given UUID is in use.""" - message = _("One or more network segments belonging to network " - "profile %(profile)s is in use.") - - -class NoMoreNetworkSegments(exceptions.NoNetworkAvailable): - """Network segments exhausted for the given network profile.""" - message = _("No more segments available in network segment pool " - "%(network_profile_name)s.") - - -class VMNetworkNotFound(exceptions.NotFound): - """VM Network with the given name cannot be found.""" - message = _("VM Network %(name)s could not be found.") - - -class VxlanIDInUse(exceptions.InUse): - """VXLAN ID is in use.""" - message = _("Unable to create the network. " - "The VXLAN ID %(vxlan_id)s is in use.") - - -class VxlanIDNotFound(exceptions.NotFound): - """VXLAN ID cannot be found.""" - message = _("Vxlan ID %(vxlan_id)s not found.") - - -class VxlanIDOutsidePool(exceptions.NeutronException): - """VXLAN ID cannot be allocated, as it is outside the configured pool.""" - message = _("Unable to complete operation. VXLAN ID exists outside of the " - "configured network segment range.") - - -class VSMConnectionFailed(exceptions.ServiceUnavailable): - """Connection to VSM failed.""" - message = _("Connection to VSM failed: %(reason)s.") - - -class VSMError(exceptions.NeutronException): - """Error has occurred on the VSM.""" - message = _("Internal VSM Error: %(reason)s.") - - -class NetworkBindingNotFound(exceptions.NotFound): - """Network Binding for network cannot be found.""" - message = _("Network Binding for network %(network_id)s could " - "not be found.") - - -class PortBindingNotFound(exceptions.NotFound): - """Port Binding for port cannot be found.""" - message = _("Port Binding for port %(port_id)s could " - "not be found.") - - -class ProfileTenantBindingNotFound(exceptions.NotFound): - """Profile to Tenant binding for given profile ID cannot be found.""" - message = _("Profile-Tenant binding for profile %(profile_id)s could " - "not be found.") - - -class NoClusterFound(exceptions.NotFound): - """No service cluster found to perform multi-segment bridging.""" - message = _("No service cluster found to perform multi-segment bridging.") diff --git a/neutron/plugins/cisco/common/cisco_faults.py b/neutron/plugins/cisco/common/cisco_faults.py deleted file mode 100644 index 80e787e41..000000000 --- a/neutron/plugins/cisco/common/cisco_faults.py +++ /dev/null @@ -1,138 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ying Liu, Cisco Systems, Inc. - -import webob.dec - -from neutron import wsgi - - -class Fault(webob.exc.HTTPException): - """Error codes for API faults.""" - - _fault_names = { - 400: "malformedRequest", - 401: "unauthorized", - 451: "CredentialNotFound", - 452: "QoSNotFound", - 453: "NovatenantNotFound", - 454: "MultiportNotFound", - 470: "serviceUnavailable", - 471: "pluginFault" - } - - def __init__(self, exception): - """Create a Fault for the given webob.exc.exception.""" - self.wrapped_exc = exception - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - """Generate a WSGI response. - - Response is generated based on the exception passed to constructor. - """ - # Replace the body with fault details. - code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "neutronServiceFault") - fault_data = { - fault_name: { - 'code': code, - 'message': self.wrapped_exc.explanation}} - # 'code' is an attribute on the fault tag itself - content_type = req.best_match_content_type() - self.wrapped_exc.body = wsgi.Serializer().serialize( - fault_data, content_type) - self.wrapped_exc.content_type = content_type - return self.wrapped_exc - - -class PortNotFound(webob.exc.HTTPClientError): - """PortNotFound exception. - - subclass of :class:`~HTTPClientError` - - This indicates that the server did not find the port specified - in the HTTP request for a given network - - code: 430, title: Port not Found - """ - code = 430 - title = _('Port not Found') - explanation = _('Unable to find a port with the specified identifier.') - - -class CredentialNotFound(webob.exc.HTTPClientError): - """CredentialNotFound exception. - - subclass of :class:`~HTTPClientError` - - This indicates that the server did not find the Credential specified - in the HTTP request - - code: 451, title: Credential not Found - """ - code = 451 - title = _('Credential Not Found') - explanation = _('Unable to find a Credential with' - ' the specified identifier.') - - -class QosNotFound(webob.exc.HTTPClientError): - """QosNotFound exception. - - subclass of :class:`~HTTPClientError` - - This indicates that the server did not find the QoS specified - in the HTTP request - - code: 452, title: QoS not Found - """ - code = 452 - title = _('QoS Not Found') - explanation = _('Unable to find a QoS with' - ' the specified identifier.') - - -class NovatenantNotFound(webob.exc.HTTPClientError): - """NovatenantNotFound exception. - - subclass of :class:`~HTTPClientError` - - This indicates that the server did not find the Novatenant specified - in the HTTP request - - code: 453, title: Nova tenant not Found - """ - code = 453 - title = _('Nova tenant Not Found') - explanation = _('Unable to find a Novatenant with' - ' the specified identifier.') - - -class RequestedStateInvalid(webob.exc.HTTPClientError): - """RequestedStateInvalid exception. - - subclass of :class:`~HTTPClientError` - - This indicates that the server could not update the port state to - to the request value - - code: 431, title: Requested State Invalid - """ - code = 431 - title = _('Requested State Invalid') - explanation = _('Unable to update port state with specified value.') diff --git a/neutron/plugins/cisco/common/config.py b/neutron/plugins/cisco/common/config.py deleted file mode 100644 index f13569cea..000000000 --- a/neutron/plugins/cisco/common/config.py +++ /dev/null @@ -1,151 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.agent.common import config - - -cisco_plugins_opts = [ - cfg.StrOpt('vswitch_plugin', - default='neutron.plugins.openvswitch.ovs_neutron_plugin.' - 'OVSNeutronPluginV2', - help=_("Virtual Switch to use")), - cfg.StrOpt('nexus_plugin', - default='neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.' - 'NexusPlugin', - help=_("Nexus Switch to use")), -] - -cisco_opts = [ - cfg.StrOpt('vlan_name_prefix', default='q-', - help=_("VLAN Name prefix")), - cfg.StrOpt('provider_vlan_name_prefix', default='p-', - help=_("VLAN Name prefix for provider vlans")), - cfg.BoolOpt('provider_vlan_auto_create', default=True, - help=_('Provider VLANs are automatically created as needed ' - 'on the Nexus switch')), - cfg.BoolOpt('provider_vlan_auto_trunk', default=True, - help=_('Provider VLANs are automatically trunked as needed ' - 'on the ports of the Nexus switch')), - cfg.BoolOpt('nexus_l3_enable', default=False, - help=_("Enable L3 support on the Nexus switches")), - cfg.BoolOpt('svi_round_robin', default=False, - help=_("Distribute SVI interfaces over all switches")), - cfg.StrOpt('model_class', - default='neutron.plugins.cisco.models.virt_phy_sw_v2.' - 'VirtualPhysicalSwitchModelV2', - help=_("Model Class")), - cfg.StrOpt('nexus_driver', - default='neutron.plugins.cisco.test.nexus.' - 'fake_nexus_driver.CiscoNEXUSFakeDriver', - help=_("Nexus Driver Name")), -] - -cisco_n1k_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("N1K Integration Bridge")), - cfg.BoolOpt('enable_tunneling', default=True, - help=_("N1K Enable Tunneling")), - cfg.StrOpt('tunnel_bridge', default='br-tun', - help=_("N1K Tunnel Bridge")), - cfg.StrOpt('local_ip', default='10.0.0.3', - help=_("N1K Local IP")), - cfg.StrOpt('tenant_network_type', default='local', - help=_("N1K Tenant Network Type")), - cfg.StrOpt('bridge_mappings', default='', - help=_("N1K Bridge Mappings")), - cfg.StrOpt('vxlan_id_ranges', default='5000:10000', - help=_("N1K VXLAN ID Ranges")), - cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095', - help=_("N1K Network VLAN Ranges")), - cfg.StrOpt('default_network_profile', default='default_network_profile', - help=_("N1K default network profile")), - cfg.StrOpt('default_policy_profile', default='service_profile', - help=_("N1K default policy profile")), - cfg.StrOpt('network_node_policy_profile', default='dhcp_pp', - help=_("N1K policy profile for network node")), - cfg.IntOpt('poll_duration', default=10, - help=_("N1K Policy profile polling duration in seconds")), - cfg.IntOpt('http_pool_size', default=4, - help=_("Number of threads to use to make HTTP requests")), -] - -cfg.CONF.register_opts(cisco_opts, "CISCO") -cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K") -cfg.CONF.register_opts(cisco_plugins_opts, "CISCO_PLUGINS") -config.register_root_helper(cfg.CONF) - -# shortcuts -CONF = cfg.CONF -CISCO = cfg.CONF.CISCO -CISCO_N1K = cfg.CONF.CISCO_N1K -CISCO_PLUGINS = cfg.CONF.CISCO_PLUGINS - -# -# device_dictionary - Contains all external device configuration. -# -# When populated the device dictionary format is: -# {('', '', ''): '', ...} -# -# Example: -# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin', -# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword', -# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...} -# -device_dictionary = {} - -# -# first_device_ip - IP address of first switch discovered in config -# -# Used for SVI placement when round-robin placement is disabled -# -first_device_ip = None - - -class CiscoConfigOptions(): - """Cisco Configuration Options Class.""" - - def __init__(self): - self._create_device_dictionary() - - def _create_device_dictionary(self): - """ - Create the device dictionary from the cisco_plugins.ini - device supported sections. Ex. NEXUS_SWITCH, N1KV. - """ - - global first_device_ip - - multi_parser = cfg.MultiConfigParser() - read_ok = multi_parser.read(CONF.config_file) - - if len(read_ok) != len(CONF.config_file): - raise cfg.Error(_("Some config files were not parsed properly")) - - first_device_ip = None - for parsed_file in multi_parser.parsed: - for parsed_item in parsed_file.keys(): - dev_id, sep, dev_ip = parsed_item.partition(':') - if dev_id.lower() in ['nexus_switch', 'n1kv']: - for dev_key, value in parsed_file[parsed_item].items(): - if dev_ip and not first_device_ip: - first_device_ip = dev_ip - device_dictionary[dev_id, dev_ip, dev_key] = value[0] - - -def get_device_dictionary(): - return device_dictionary diff --git a/neutron/plugins/cisco/db/__init__.py b/neutron/plugins/cisco/db/__init__.py deleted file mode 100644 index db695fb0a..000000000 --- a/neutron/plugins/cisco/db/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# diff --git a/neutron/plugins/cisco/db/n1kv_db_v2.py b/neutron/plugins/cisco/db/n1kv_db_v2.py deleted file mode 100644 index d924af9b3..000000000 --- a/neutron/plugins/cisco/db/n1kv_db_v2.py +++ /dev/null @@ -1,1621 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Aruna Kushwaha, Cisco Systems Inc. -# @author: Abhishek Raut, Cisco Systems Inc. -# @author: Rudrajit Tapadar, Cisco Systems Inc. -# @author: Sergey Sudakovich, Cisco Systems Inc. - -import netaddr -import re -from sqlalchemy.orm import exc -from sqlalchemy import sql - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import exceptions as n_exc -import neutron.db.api as db -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.cisco.common import cisco_constants as c_const -from neutron.plugins.cisco.common import cisco_exceptions as c_exc -from neutron.plugins.cisco.db import n1kv_models_v2 - -LOG = logging.getLogger(__name__) - - -def del_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): - """ - Delete a trunk network binding. - - :param db_session: database session - :param trunk_segment_id: UUID representing the trunk network - :param segment_pairs: List of segment UUIDs in pair - representing the segments that are trunked - """ - with db_session.begin(subtransactions=True): - for (segment_id, dot1qtag) in segment_pairs: - (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). - filter_by(trunk_segment_id=trunk_segment_id, - segment_id=segment_id, - dot1qtag=dot1qtag).delete()) - alloc = (db_session.query(n1kv_models_v2. - N1kvTrunkSegmentBinding). - filter_by(trunk_segment_id=trunk_segment_id).first()) - if not alloc: - binding = get_network_binding(db_session, trunk_segment_id) - binding.physical_network = None - - -def del_multi_segment_binding(db_session, multi_segment_id, segment_pairs): - """ - Delete a multi-segment network binding. - - :param db_session: database session - :param multi_segment_id: UUID representing the multi-segment network - :param segment_pairs: List of segment UUIDs in pair - representing the segments that are bridged - """ - with db_session.begin(subtransactions=True): - for (segment1_id, segment2_id) in segment_pairs: - (db_session.query(n1kv_models_v2. - N1kvMultiSegmentNetworkBinding).filter_by( - multi_segment_id=multi_segment_id, - segment1_id=segment1_id, - segment2_id=segment2_id).delete()) - - -def add_trunk_segment_binding(db_session, trunk_segment_id, segment_pairs): - """ - Create a trunk network binding. - - :param db_session: database session - :param trunk_segment_id: UUID representing the multi-segment network - :param segment_pairs: List of segment UUIDs in pair - representing the segments to be trunked - """ - with db_session.begin(subtransactions=True): - binding = get_network_binding(db_session, trunk_segment_id) - for (segment_id, tag) in segment_pairs: - if not binding.physical_network: - member_seg_binding = get_network_binding(db_session, - segment_id) - binding.physical_network = member_seg_binding.physical_network - trunk_segment_binding = ( - n1kv_models_v2.N1kvTrunkSegmentBinding( - trunk_segment_id=trunk_segment_id, - segment_id=segment_id, dot1qtag=tag)) - db_session.add(trunk_segment_binding) - - -def add_multi_segment_binding(db_session, multi_segment_id, segment_pairs): - """ - Create a multi-segment network binding. - - :param db_session: database session - :param multi_segment_id: UUID representing the multi-segment network - :param segment_pairs: List of segment UUIDs in pair - representing the segments to be bridged - """ - with db_session.begin(subtransactions=True): - for (segment1_id, segment2_id) in segment_pairs: - multi_segment_binding = ( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding( - multi_segment_id=multi_segment_id, - segment1_id=segment1_id, - segment2_id=segment2_id)) - db_session.add(multi_segment_binding) - - -def add_multi_segment_encap_profile_name(db_session, multi_segment_id, - segment_pair, profile_name): - """ - Add the encapsulation profile name to the multi-segment network binding. - - :param db_session: database session - :param multi_segment_id: UUID representing the multi-segment network - :param segment_pair: set containing the segment UUIDs that are bridged - """ - with db_session.begin(subtransactions=True): - binding = get_multi_segment_network_binding(db_session, - multi_segment_id, - segment_pair) - binding.encap_profile_name = profile_name - - -def get_multi_segment_network_binding(db_session, - multi_segment_id, segment_pair): - """ - Retrieve multi-segment network binding. - - :param db_session: database session - :param multi_segment_id: UUID representing the trunk network whose binding - is to fetch - :param segment_pair: set containing the segment UUIDs that are bridged - :returns: binding object - """ - try: - (segment1_id, segment2_id) = segment_pair - return (db_session.query( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding). - filter_by(multi_segment_id=multi_segment_id, - segment1_id=segment1_id, - segment2_id=segment2_id)).one() - except exc.NoResultFound: - raise c_exc.NetworkBindingNotFound(network_id=multi_segment_id) - - -def get_multi_segment_members(db_session, multi_segment_id): - """ - Retrieve all the member segments of a multi-segment network. - - :param db_session: database session - :param multi_segment_id: UUID representing the multi-segment network - :returns: a list of tuples representing the mapped segments - """ - with db_session.begin(subtransactions=True): - allocs = (db_session.query( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding). - filter_by(multi_segment_id=multi_segment_id)) - return [(a.segment1_id, a.segment2_id) for a in allocs] - - -def get_multi_segment_encap_dict(db_session, multi_segment_id): - """ - Retrieve the encapsulation profiles for every segment pairs bridged. - - :param db_session: database session - :param multi_segment_id: UUID representing the multi-segment network - :returns: a dictionary of lists containing the segment pairs in sets - """ - with db_session.begin(subtransactions=True): - encap_dict = {} - allocs = (db_session.query( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding). - filter_by(multi_segment_id=multi_segment_id)) - for alloc in allocs: - if alloc.encap_profile_name not in encap_dict: - encap_dict[alloc.encap_profile_name] = [] - seg_pair = (alloc.segment1_id, alloc.segment2_id) - encap_dict[alloc.encap_profile_name].append(seg_pair) - return encap_dict - - -def get_trunk_network_binding(db_session, trunk_segment_id, segment_pair): - """ - Retrieve trunk network binding. - - :param db_session: database session - :param trunk_segment_id: UUID representing the trunk network whose binding - is to fetch - :param segment_pair: set containing the segment_id and dot1qtag - :returns: binding object - """ - try: - (segment_id, dot1qtag) = segment_pair - return (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). - filter_by(trunk_segment_id=trunk_segment_id, - segment_id=segment_id, - dot1qtag=dot1qtag)).one() - except exc.NoResultFound: - raise c_exc.NetworkBindingNotFound(network_id=trunk_segment_id) - - -def get_trunk_members(db_session, trunk_segment_id): - """ - Retrieve all the member segments of a trunk network. - - :param db_session: database session - :param trunk_segment_id: UUID representing the trunk network - :returns: a list of tuples representing the segment and their - corresponding dot1qtag - """ - with db_session.begin(subtransactions=True): - allocs = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). - filter_by(trunk_segment_id=trunk_segment_id)) - return [(a.segment_id, a.dot1qtag) for a in allocs] - - -def is_trunk_member(db_session, segment_id): - """ - Checks if a segment is a member of a trunk segment. - - :param db_session: database session - :param segment_id: UUID of the segment to be checked - :returns: boolean - """ - with db_session.begin(subtransactions=True): - ret = (db_session.query(n1kv_models_v2.N1kvTrunkSegmentBinding). - filter_by(segment_id=segment_id).first()) - return bool(ret) - - -def is_multi_segment_member(db_session, segment_id): - """ - Checks if a segment is a member of a multi-segment network. - - :param db_session: database session - :param segment_id: UUID of the segment to be checked - :returns: boolean - """ - with db_session.begin(subtransactions=True): - ret1 = (db_session.query( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding). - filter_by(segment1_id=segment_id).first()) - ret2 = (db_session.query( - n1kv_models_v2.N1kvMultiSegmentNetworkBinding). - filter_by(segment2_id=segment_id).first()) - return bool(ret1 or ret2) - - -def get_network_binding(db_session, network_id): - """ - Retrieve network binding. - - :param db_session: database session - :param network_id: UUID representing the network whose binding is - to fetch - :returns: binding object - """ - try: - return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). - filter_by(network_id=network_id). - one()) - except exc.NoResultFound: - raise c_exc.NetworkBindingNotFound(network_id=network_id) - - -def add_network_binding(db_session, network_id, network_type, - physical_network, segmentation_id, - multicast_ip, network_profile_id, add_segments): - """ - Create network binding. - - :param db_session: database session - :param network_id: UUID representing the network - :param network_type: string representing type of network (VLAN, OVERLAY, - MULTI_SEGMENT or TRUNK) - :param physical_network: Only applicable for VLAN networks. It - represents a L2 Domain - :param segmentation_id: integer representing VLAN or VXLAN ID - :param multicast_ip: Native VXLAN technology needs a multicast IP to be - associated with every VXLAN ID to deal with broadcast - packets. A single multicast IP can be shared by - multiple VXLAN IDs. - :param network_profile_id: network profile ID based on which this network - is created - :param add_segments: List of segment UUIDs in pairs to be added to either a - multi-segment or trunk network - """ - with db_session.begin(subtransactions=True): - binding = n1kv_models_v2.N1kvNetworkBinding( - network_id=network_id, - network_type=network_type, - physical_network=physical_network, - segmentation_id=segmentation_id, - multicast_ip=multicast_ip, - profile_id=network_profile_id) - db_session.add(binding) - if add_segments is None: - pass - elif network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - add_multi_segment_binding(db_session, network_id, add_segments) - elif network_type == c_const.NETWORK_TYPE_TRUNK: - add_trunk_segment_binding(db_session, network_id, add_segments) - - -def get_segment_range(network_profile): - """ - Get the segment range min and max for a network profile. - - :params network_profile: object of type network profile - :returns: integer values representing minimum and maximum segment - range value - """ - # Sort the range to ensure min, max is in order - seg_min, seg_max = sorted( - int(i) for i in network_profile.segment_range.split('-')) - LOG.debug(_("seg_min %(seg_min)s, seg_max %(seg_max)s"), - {'seg_min': seg_min, 'seg_max': seg_max}) - return seg_min, seg_max - - -def get_multicast_ip(network_profile): - """ - Retrieve a multicast ip from the defined pool. - - :params network_profile: object of type network profile - :returns: string representing multicast IP - """ - # Round robin multicast ip allocation - min_ip, max_ip = _get_multicast_ip_range(network_profile) - addr_list = list((netaddr.iter_iprange(min_ip, max_ip))) - mul_ip_str = str(addr_list[network_profile.multicast_ip_index]) - - network_profile.multicast_ip_index += 1 - if network_profile.multicast_ip_index == len(addr_list): - network_profile.multicast_ip_index = 0 - return mul_ip_str - - -def _get_multicast_ip_range(network_profile): - """ - Helper method to retrieve minimum and maximum multicast ip. - - :params network_profile: object of type network profile - :returns: two strings representing minimum multicast ip and - maximum multicast ip - """ - # Assumption: ip range belongs to the same subnet - # Assumption: ip range is already sorted - return network_profile.multicast_ip_range.split('-') - - -def get_port_binding(db_session, port_id): - """ - Retrieve port binding. - - :param db_session: database session - :param port_id: UUID representing the port whose binding is to fetch - :returns: port binding object - """ - try: - return (db_session.query(n1kv_models_v2.N1kvPortBinding). - filter_by(port_id=port_id). - one()) - except exc.NoResultFound: - raise c_exc.PortBindingNotFound(port_id=port_id) - - -def add_port_binding(db_session, port_id, policy_profile_id): - """ - Create port binding. - - Bind the port with policy profile. - :param db_session: database session - :param port_id: UUID of the port - :param policy_profile_id: UUID of the policy profile - """ - with db_session.begin(subtransactions=True): - binding = n1kv_models_v2.N1kvPortBinding(port_id=port_id, - profile_id=policy_profile_id) - db_session.add(binding) - - -def delete_segment_allocations(db_session, net_p): - """ - Delete the segment allocation entry from the table. - - :params db_session: database session - :params net_p: network profile object - """ - with db_session.begin(subtransactions=True): - seg_min, seg_max = get_segment_range(net_p) - if net_p['segment_type'] == c_const.NETWORK_TYPE_VLAN: - db_session.query(n1kv_models_v2.N1kvVlanAllocation).filter( - (n1kv_models_v2.N1kvVlanAllocation.physical_network == - net_p['physical_network']), - (n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min), - (n1kv_models_v2.N1kvVlanAllocation.vlan_id <= - seg_max)).delete() - elif net_p['segment_type'] == c_const.NETWORK_TYPE_OVERLAY: - db_session.query(n1kv_models_v2.N1kvVxlanAllocation).filter( - (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= seg_min), - (n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= - seg_max)).delete() - - -def sync_vlan_allocations(db_session, net_p): - """ - Synchronize vlan_allocations table with configured VLAN ranges. - - Sync the network profile range with the vlan_allocations table for each - physical network. - :param db_session: database session - :param net_p: network profile dictionary - """ - with db_session.begin(subtransactions=True): - seg_min, seg_max = get_segment_range(net_p) - for vlan_id in range(seg_min, seg_max + 1): - try: - get_vlan_allocation(db_session, - net_p['physical_network'], - vlan_id) - except c_exc.VlanIDNotFound: - alloc = n1kv_models_v2.N1kvVlanAllocation( - physical_network=net_p['physical_network'], - vlan_id=vlan_id, - network_profile_id=net_p['id']) - db_session.add(alloc) - - -def get_vlan_allocation(db_session, physical_network, vlan_id): - """ - Retrieve vlan allocation. - - :param db_session: database session - :param physical network: string name for the physical network - :param vlan_id: integer representing the VLAN ID. - :returns: allocation object for given physical network and VLAN ID - """ - try: - return (db_session.query(n1kv_models_v2.N1kvVlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id).one()) - except exc.NoResultFound: - raise c_exc.VlanIDNotFound(vlan_id=vlan_id) - - -def reserve_vlan(db_session, network_profile): - """ - Reserve a VLAN ID within the range of the network profile. - - :param db_session: database session - :param network_profile: network profile object - """ - seg_min, seg_max = get_segment_range(network_profile) - segment_type = c_const.NETWORK_TYPE_VLAN - - with db_session.begin(subtransactions=True): - alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). - filter(sql.and_( - n1kv_models_v2.N1kvVlanAllocation.vlan_id >= seg_min, - n1kv_models_v2.N1kvVlanAllocation.vlan_id <= seg_max, - n1kv_models_v2.N1kvVlanAllocation.physical_network == - network_profile['physical_network'], - n1kv_models_v2.N1kvVlanAllocation.allocated == - sql.false()) - )).first() - if alloc: - segment_id = alloc.vlan_id - physical_network = alloc.physical_network - alloc.allocated = True - return (physical_network, segment_type, segment_id, "0.0.0.0") - raise c_exc.NoMoreNetworkSegments( - network_profile_name=network_profile.name) - - -def reserve_vxlan(db_session, network_profile): - """ - Reserve a VXLAN ID within the range of the network profile. - - :param db_session: database session - :param network_profile: network profile object - """ - seg_min, seg_max = get_segment_range(network_profile) - segment_type = c_const.NETWORK_TYPE_OVERLAY - physical_network = "" - - with db_session.begin(subtransactions=True): - alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). - filter(sql.and_( - n1kv_models_v2.N1kvVxlanAllocation.vxlan_id >= - seg_min, - n1kv_models_v2.N1kvVxlanAllocation.vxlan_id <= - seg_max, - n1kv_models_v2.N1kvVxlanAllocation.allocated == - sql.false()) - ).first()) - if alloc: - segment_id = alloc.vxlan_id - alloc.allocated = True - if network_profile.sub_type == (c_const. - NETWORK_SUBTYPE_NATIVE_VXLAN): - return (physical_network, segment_type, - segment_id, get_multicast_ip(network_profile)) - else: - return (physical_network, segment_type, segment_id, "0.0.0.0") - raise n_exc.NoNetworkAvailable() - - -def alloc_network(db_session, network_profile_id): - """ - Allocate network using first available free segment ID in segment range. - - :param db_session: database session - :param network_profile_id: UUID representing the network profile - """ - with db_session.begin(subtransactions=True): - network_profile = get_network_profile(db_session, - network_profile_id) - if network_profile.segment_type == c_const.NETWORK_TYPE_VLAN: - return reserve_vlan(db_session, network_profile) - if network_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: - return reserve_vxlan(db_session, network_profile) - return (None, network_profile.segment_type, 0, "0.0.0.0") - - -def reserve_specific_vlan(db_session, physical_network, vlan_id): - """ - Reserve a specific VLAN ID for the network. - - :param db_session: database session - :param physical_network: string representing the name of physical network - :param vlan_id: integer value of the segmentation ID to be reserved - """ - with db_session.begin(subtransactions=True): - try: - alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - one()) - if alloc.allocated: - if vlan_id == c_const.FLAT_VLAN_ID: - raise n_exc.FlatNetworkInUse( - physical_network=physical_network) - else: - raise n_exc.VlanIdInUse(vlan_id=vlan_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(vlan)s on physical " - "network %(network)s from pool"), - {"vlan": vlan_id, "network": physical_network}) - alloc.allocated = True - db_session.add(alloc) - except exc.NoResultFound: - raise c_exc.VlanIDOutsidePool - - -def release_vlan(db_session, physical_network, vlan_id): - """ - Release a given VLAN ID. - - :param db_session: database session - :param physical_network: string representing the name of physical network - :param vlan_id: integer value of the segmentation ID to be released - """ - with db_session.begin(subtransactions=True): - try: - alloc = (db_session.query(n1kv_models_v2.N1kvVlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - one()) - alloc.allocated = False - except exc.NoResultFound: - LOG.warning(_("vlan_id %(vlan)s on physical network %(network)s " - "not found"), - {"vlan": vlan_id, "network": physical_network}) - - -def sync_vxlan_allocations(db_session, net_p): - """ - Synchronize vxlan_allocations table with configured vxlan ranges. - - :param db_session: database session - :param net_p: network profile dictionary - """ - seg_min, seg_max = get_segment_range(net_p) - if seg_max + 1 - seg_min > c_const.MAX_VXLAN_RANGE: - msg = (_("Unreasonable vxlan ID range %(vxlan_min)s - %(vxlan_max)s"), - {"vxlan_min": seg_min, "vxlan_max": seg_max}) - raise n_exc.InvalidInput(error_message=msg) - with db_session.begin(subtransactions=True): - for vxlan_id in range(seg_min, seg_max + 1): - try: - get_vxlan_allocation(db_session, vxlan_id) - except c_exc.VxlanIDNotFound: - alloc = n1kv_models_v2.N1kvVxlanAllocation( - network_profile_id=net_p['id'], vxlan_id=vxlan_id) - db_session.add(alloc) - - -def get_vxlan_allocation(db_session, vxlan_id): - """ - Retrieve VXLAN allocation for the given VXLAN ID. - - :param db_session: database session - :param vxlan_id: integer value representing the segmentation ID - :returns: allocation object - """ - try: - return (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). - filter_by(vxlan_id=vxlan_id).one()) - except exc.NoResultFound: - raise c_exc.VxlanIDNotFound(vxlan_id=vxlan_id) - - -def reserve_specific_vxlan(db_session, vxlan_id): - """ - Reserve a specific VXLAN ID. - - :param db_session: database session - :param vxlan_id: integer value representing the segmentation ID - """ - with db_session.begin(subtransactions=True): - try: - alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). - filter_by(vxlan_id=vxlan_id). - one()) - if alloc.allocated: - raise c_exc.VxlanIDInUse(vxlan_id=vxlan_id) - LOG.debug(_("Reserving specific vxlan %s from pool"), vxlan_id) - alloc.allocated = True - db_session.add(alloc) - except exc.NoResultFound: - raise c_exc.VxlanIDOutsidePool - - -def release_vxlan(db_session, vxlan_id): - """ - Release a given VXLAN ID. - - :param db_session: database session - :param vxlan_id: integer value representing the segmentation ID - """ - with db_session.begin(subtransactions=True): - try: - alloc = (db_session.query(n1kv_models_v2.N1kvVxlanAllocation). - filter_by(vxlan_id=vxlan_id). - one()) - alloc.allocated = False - except exc.NoResultFound: - LOG.warning(_("vxlan_id %s not found"), vxlan_id) - - -def set_port_status(port_id, status): - """ - Set the status of the port. - - :param port_id: UUID representing the port - :param status: string representing the new status - """ - db_session = db.get_session() - try: - port = db_session.query(models_v2.Port).filter_by(id=port_id).one() - port.status = status - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) - - -def get_vm_network(db_session, policy_profile_id, network_id): - """ - Retrieve a vm_network based on policy profile and network id. - - :param db_session: database session - :param policy_profile_id: UUID representing policy profile - :param network_id: UUID representing network - :returns: VM network object - """ - try: - return (db_session.query(n1kv_models_v2.N1kVmNetwork). - filter_by(profile_id=policy_profile_id, - network_id=network_id).one()) - except exc.NoResultFound: - name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id - + "_" + network_id) - raise c_exc.VMNetworkNotFound(name=name) - - -def add_vm_network(db_session, - name, - policy_profile_id, - network_id, - port_count): - """ - Create a VM network. - - Add a VM network for a unique combination of network and - policy profile. All ports having the same policy profile - on one network will be associated with one VM network. - :param db_session: database session - :param name: string representing the name of the VM network - :param policy_profile_id: UUID representing policy profile - :param network_id: UUID representing a network - :param port_count: integer representing the number of ports on vm network - """ - with db_session.begin(subtransactions=True): - vm_network = n1kv_models_v2.N1kVmNetwork( - name=name, - profile_id=policy_profile_id, - network_id=network_id, - port_count=port_count) - db_session.add(vm_network) - - -def update_vm_network_port_count(db_session, name, port_count): - """ - Update a VM network with new port count. - - :param db_session: database session - :param name: string representing the name of the VM network - :param port_count: integer representing the number of ports on VM network - """ - try: - with db_session.begin(subtransactions=True): - vm_network = (db_session.query(n1kv_models_v2.N1kVmNetwork). - filter_by(name=name).one()) - if port_count is not None: - vm_network.port_count = port_count - return vm_network - except exc.NoResultFound: - raise c_exc.VMNetworkNotFound(name=name) - - -def delete_vm_network(db_session, policy_profile_id, network_id): - """ - Delete a VM network. - - :param db_session: database session - :param policy_profile_id: UUID representing a policy profile - :param network_id: UUID representing a network - :returns: deleted VM network object - """ - with db_session.begin(subtransactions=True): - try: - vm_network = get_vm_network(db_session, - policy_profile_id, - network_id) - db_session.delete(vm_network) - db_session.query(n1kv_models_v2.N1kVmNetwork).filter_by( - name=vm_network["name"]).delete() - return vm_network - except exc.NoResultFound: - name = (c_const.VM_NETWORK_NAME_PREFIX + policy_profile_id + - "_" + network_id) - raise c_exc.VMNetworkNotFound(name=name) - - -def create_network_profile(db_session, network_profile): - """Create a network profile.""" - LOG.debug(_("create_network_profile()")) - with db_session.begin(subtransactions=True): - kwargs = {"name": network_profile["name"], - "segment_type": network_profile["segment_type"]} - if network_profile["segment_type"] == c_const.NETWORK_TYPE_VLAN: - kwargs["physical_network"] = network_profile["physical_network"] - kwargs["segment_range"] = network_profile["segment_range"] - elif network_profile["segment_type"] == c_const.NETWORK_TYPE_OVERLAY: - kwargs["multicast_ip_index"] = 0 - kwargs["multicast_ip_range"] = network_profile[ - "multicast_ip_range"] - kwargs["segment_range"] = network_profile["segment_range"] - kwargs["sub_type"] = network_profile["sub_type"] - elif network_profile["segment_type"] == c_const.NETWORK_TYPE_TRUNK: - kwargs["sub_type"] = network_profile["sub_type"] - net_profile = n1kv_models_v2.NetworkProfile(**kwargs) - db_session.add(net_profile) - return net_profile - - -def delete_network_profile(db_session, id): - """Delete Network Profile.""" - LOG.debug(_("delete_network_profile()")) - with db_session.begin(subtransactions=True): - try: - network_profile = get_network_profile(db_session, id) - db_session.delete(network_profile) - (db_session.query(n1kv_models_v2.ProfileBinding). - filter_by(profile_id=id).delete()) - return network_profile - except exc.NoResultFound: - raise c_exc.ProfileTenantBindingNotFound(profile_id=id) - - -def update_network_profile(db_session, id, network_profile): - """Update Network Profile.""" - LOG.debug(_("update_network_profile()")) - with db_session.begin(subtransactions=True): - profile = get_network_profile(db_session, id) - profile.update(network_profile) - return profile - - -def get_network_profile(db_session, id): - """Get Network Profile.""" - LOG.debug(_("get_network_profile()")) - try: - return db_session.query( - n1kv_models_v2.NetworkProfile).filter_by(id=id).one() - except exc.NoResultFound: - raise c_exc.NetworkProfileNotFound(profile=id) - - -def _get_network_profiles(db_session=None, physical_network=None): - """ - Retrieve all network profiles. - - Get Network Profiles on a particular physical network, if physical - network is specified. If no physical network is specified, return - all network profiles. - """ - db_session = db_session or db.get_session() - if physical_network: - return (db_session.query(n1kv_models_v2.NetworkProfile). - filter_by(physical_network=physical_network)) - return db_session.query(n1kv_models_v2.NetworkProfile) - - -def create_policy_profile(policy_profile): - """Create Policy Profile.""" - LOG.debug(_("create_policy_profile()")) - db_session = db.get_session() - with db_session.begin(subtransactions=True): - p_profile = n1kv_models_v2.PolicyProfile(id=policy_profile["id"], - name=policy_profile["name"]) - db_session.add(p_profile) - return p_profile - - -def delete_policy_profile(id): - """Delete Policy Profile.""" - LOG.debug(_("delete_policy_profile()")) - db_session = db.get_session() - with db_session.begin(subtransactions=True): - policy_profile = get_policy_profile(db_session, id) - db_session.delete(policy_profile) - - -def update_policy_profile(db_session, id, policy_profile): - """Update a policy profile.""" - LOG.debug(_("update_policy_profile()")) - with db_session.begin(subtransactions=True): - _profile = get_policy_profile(db_session, id) - _profile.update(policy_profile) - return _profile - - -def get_policy_profile(db_session, id): - """Get Policy Profile.""" - LOG.debug(_("get_policy_profile()")) - try: - return db_session.query( - n1kv_models_v2.PolicyProfile).filter_by(id=id).one() - except exc.NoResultFound: - raise c_exc.PolicyProfileIdNotFound(profile_id=id) - - -def get_policy_profiles(): - """Retrieve all policy profiles.""" - db_session = db.get_session() - with db_session.begin(subtransactions=True): - return db_session.query(n1kv_models_v2.PolicyProfile) - - -def create_profile_binding(db_session, tenant_id, profile_id, profile_type): - """Create Network/Policy Profile association with a tenant.""" - db_session = db_session or db.get_session() - if profile_type not in ["network", "policy"]: - raise n_exc.NeutronException(_("Invalid profile type")) - - if _profile_binding_exists(db_session, - tenant_id, - profile_id, - profile_type): - return get_profile_binding(db_session, tenant_id, profile_id) - - with db_session.begin(subtransactions=True): - binding = n1kv_models_v2.ProfileBinding(profile_type=profile_type, - profile_id=profile_id, - tenant_id=tenant_id) - db_session.add(binding) - return binding - - -def _profile_binding_exists(db_session, tenant_id, profile_id, profile_type): - LOG.debug(_("_profile_binding_exists()")) - return (db_session.query(n1kv_models_v2.ProfileBinding). - filter_by(tenant_id=tenant_id, profile_id=profile_id, - profile_type=profile_type).first()) - - -def get_profile_binding(db_session, tenant_id, profile_id): - """Get Network/Policy Profile - Tenant binding.""" - LOG.debug(_("get_profile_binding()")) - try: - return (db_session.query(n1kv_models_v2.ProfileBinding).filter_by( - tenant_id=tenant_id, profile_id=profile_id).one()) - except exc.NoResultFound: - raise c_exc.ProfileTenantBindingNotFound(profile_id=profile_id) - - -def delete_profile_binding(db_session, tenant_id, profile_id): - """Delete Policy Binding.""" - LOG.debug(_("delete_profile_binding()")) - db_session = db_session or db.get_session() - try: - binding = get_profile_binding(db_session, tenant_id, profile_id) - with db_session.begin(subtransactions=True): - db_session.delete(binding) - except c_exc.ProfileTenantBindingNotFound: - LOG.debug(_("Profile-Tenant binding missing for profile ID " - "%(profile_id)s and tenant ID %(tenant_id)s"), - {"profile_id": profile_id, "tenant_id": tenant_id}) - return - - -def _get_profile_bindings(db_session, profile_type=None): - """ - Retrieve a list of profile bindings. - - Get all profile-tenant bindings based on profile type. - If profile type is None, return profile-tenant binding for all - profile types. - """ - LOG.debug(_("_get_profile_bindings()")) - if profile_type: - profile_bindings = (db_session.query(n1kv_models_v2.ProfileBinding). - filter_by(profile_type=profile_type)) - return profile_bindings - return db_session.query(n1kv_models_v2.ProfileBinding) - - -class NetworkProfile_db_mixin(object): - - """Network Profile Mixin.""" - - def _replace_fake_tenant_id_with_real(self, context): - """ - Replace default tenant-id with admin tenant-ids. - - Default tenant-ids are populated in profile bindings when plugin is - initialized. Replace these tenant-ids with admin's tenant-id. - :param context: neutron api request context - """ - if context.is_admin and context.tenant_id: - tenant_id = context.tenant_id - db_session = context.session - with db_session.begin(subtransactions=True): - (db_session.query(n1kv_models_v2.ProfileBinding). - filter_by(tenant_id=c_const.TENANT_ID_NOT_SET). - update({'tenant_id': tenant_id})) - - def _get_network_collection_for_tenant(self, db_session, model, tenant_id): - net_profile_ids = (db_session.query(n1kv_models_v2.ProfileBinding. - profile_id). - filter_by(tenant_id=tenant_id). - filter_by(profile_type=c_const.NETWORK)) - network_profiles = (db_session.query(model).filter(model.id.in_( - pid[0] for pid in net_profile_ids))) - return [self._make_network_profile_dict(p) for p in network_profiles] - - def _make_profile_bindings_dict(self, profile_binding, fields=None): - res = {"profile_id": profile_binding["profile_id"], - "tenant_id": profile_binding["tenant_id"]} - return self._fields(res, fields) - - def _make_network_profile_dict(self, network_profile, fields=None): - res = {"id": network_profile["id"], - "name": network_profile["name"], - "segment_type": network_profile["segment_type"], - "sub_type": network_profile["sub_type"], - "segment_range": network_profile["segment_range"], - "multicast_ip_index": network_profile["multicast_ip_index"], - "multicast_ip_range": network_profile["multicast_ip_range"], - "physical_network": network_profile["physical_network"]} - return self._fields(res, fields) - - def _segment_in_use(self, db_session, network_profile): - """Verify whether a segment is allocated for given network profile.""" - with db_session.begin(subtransactions=True): - return (db_session.query(n1kv_models_v2.N1kvNetworkBinding). - filter_by(profile_id=network_profile['id'])).first() - - def get_network_profile_bindings(self, context, filters=None, fields=None): - """ - Retrieve a list of profile bindings for network profiles. - - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - profile bindings object. Values in this dictiontary are - an iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a profile - bindings dictionary. Only these fields will be returned - :returns: list of profile bindings - """ - if context.is_admin: - profile_bindings = _get_profile_bindings( - context.session, - profile_type=c_const.NETWORK) - return [self._make_profile_bindings_dict(pb) - for pb in profile_bindings] - - def create_network_profile(self, context, network_profile): - """ - Create a network profile. - - :param context: neutron api request context - :param network_profile: network profile dictionary - :returns: network profile dictionary - """ - self._replace_fake_tenant_id_with_real(context) - p = network_profile["network_profile"] - self._validate_network_profile_args(context, p) - with context.session.begin(subtransactions=True): - net_profile = create_network_profile(context.session, p) - if net_profile.segment_type == c_const.NETWORK_TYPE_VLAN: - sync_vlan_allocations(context.session, net_profile) - elif net_profile.segment_type == c_const.NETWORK_TYPE_OVERLAY: - sync_vxlan_allocations(context.session, net_profile) - create_profile_binding(context.session, - context.tenant_id, - net_profile.id, - c_const.NETWORK) - if p.get("add_tenant"): - self.add_network_profile_tenant(context.session, - net_profile.id, - p["add_tenant"]) - return self._make_network_profile_dict(net_profile) - - def delete_network_profile(self, context, id): - """ - Delete a network profile. - - :param context: neutron api request context - :param id: UUID representing network profile to delete - :returns: deleted network profile dictionary - """ - # Check whether the network profile is in use. - if self._segment_in_use(context.session, - get_network_profile(context.session, id)): - raise c_exc.NetworkProfileInUse(profile=id) - # Delete and return the network profile if it is not in use. - _profile = delete_network_profile(context.session, id) - return self._make_network_profile_dict(_profile) - - def update_network_profile(self, context, id, network_profile): - """ - Update a network profile. - - Add/remove network profile to tenant-id binding for the corresponding - options and if user is admin. - :param context: neutron api request context - :param id: UUID representing network profile to update - :param network_profile: network profile dictionary - :returns: updated network profile dictionary - """ - # Flag to check whether network profile is updated or not. - is_updated = False - p = network_profile["network_profile"] - original_net_p = get_network_profile(context.session, id) - # Update network profile to tenant id binding. - if context.is_admin and "add_tenant" in p: - self.add_network_profile_tenant(context.session, id, - p["add_tenant"]) - is_updated = True - if context.is_admin and "remove_tenant" in p: - delete_profile_binding(context.session, p["remove_tenant"], id) - is_updated = True - if original_net_p.segment_type == c_const.NETWORK_TYPE_TRUNK: - #TODO(abhraut): Remove check when Trunk supports segment range. - if p.get('segment_range'): - msg = _("segment_range not required for TRUNK") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if original_net_p.segment_type in [c_const.NETWORK_TYPE_VLAN, - c_const.NETWORK_TYPE_TRUNK]: - if p.get("multicast_ip_range"): - msg = _("multicast_ip_range not required") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - # Update segment range if network profile is not in use. - if (p.get("segment_range") and - p.get("segment_range") != original_net_p.segment_range): - if not self._segment_in_use(context.session, original_net_p): - delete_segment_allocations(context.session, original_net_p) - updated_net_p = update_network_profile(context.session, id, p) - self._validate_segment_range_uniqueness(context, - updated_net_p, id) - if original_net_p.segment_type == c_const.NETWORK_TYPE_VLAN: - sync_vlan_allocations(context.session, updated_net_p) - if original_net_p.segment_type == c_const.NETWORK_TYPE_OVERLAY: - sync_vxlan_allocations(context.session, updated_net_p) - is_updated = True - else: - raise c_exc.NetworkProfileInUse(profile=id) - if (p.get('multicast_ip_range') and - (p.get("multicast_ip_range") != - original_net_p.get("multicast_ip_range"))): - self._validate_multicast_ip_range(p) - if not self._segment_in_use(context.session, original_net_p): - is_updated = True - else: - raise c_exc.NetworkProfileInUse(profile=id) - # Update network profile if name is updated and the network profile - # is not yet updated. - if "name" in p and not is_updated: - is_updated = True - # Return network profile if it is successfully updated. - if is_updated: - return self._make_network_profile_dict( - update_network_profile(context.session, id, p)) - - def get_network_profile(self, context, id, fields=None): - """ - Retrieve a network profile. - - :param context: neutron api request context - :param id: UUID representing the network profile to retrieve - :params fields: a list of strings that are valid keys in a network - profile dictionary. Only these fields will be returned - :returns: network profile dictionary - """ - profile = get_network_profile(context.session, id) - return self._make_network_profile_dict(profile, fields) - - def get_network_profiles(self, context, filters=None, fields=None): - """ - Retrieve a list of all network profiles. - - Retrieve all network profiles if tenant is admin. For a non-admin - tenant, retrieve all network profiles belonging to this tenant only. - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - network profile object. Values in this dictiontary are - an iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a network - profile dictionary. Only these fields will be returned - :returns: list of all network profiles - """ - if context.is_admin: - return self._get_collection(context, n1kv_models_v2.NetworkProfile, - self._make_network_profile_dict, - filters=filters, fields=fields) - return self._get_network_collection_for_tenant(context.session, - n1kv_models_v2. - NetworkProfile, - context.tenant_id) - - def add_network_profile_tenant(self, - db_session, - network_profile_id, - tenant_id): - """ - Add a tenant to a network profile. - - :param db_session: database session - :param network_profile_id: UUID representing network profile - :param tenant_id: UUID representing the tenant - :returns: profile binding object - """ - return create_profile_binding(db_session, - tenant_id, - network_profile_id, - c_const.NETWORK) - - def network_profile_exists(self, context, id): - """ - Verify whether a network profile for given id exists. - - :param context: neutron api request context - :param id: UUID representing network profile - :returns: true if network profile exist else False - """ - try: - get_network_profile(context.session, id) - return True - except c_exc.NetworkProfileNotFound(profile=id): - return False - - def _get_segment_range(self, data): - return (int(seg) for seg in data.split("-")[:2]) - - def _validate_network_profile_args(self, context, p): - """ - Validate completeness of Nexus1000V network profile arguments. - - :param context: neutron api request context - :param p: network profile object - """ - self._validate_network_profile(p) - segment_type = p['segment_type'].lower() - if segment_type != c_const.NETWORK_TYPE_TRUNK: - self._validate_segment_range_uniqueness(context, p) - - def _validate_segment_range(self, network_profile): - """ - Validate segment range values. - - :param network_profile: network profile object - """ - if not re.match(r"(\d+)\-(\d+)", network_profile["segment_range"]): - msg = _("Invalid segment range. example range: 500-550") - raise n_exc.InvalidInput(error_message=msg) - - def _validate_multicast_ip_range(self, network_profile): - """ - Validate multicast ip range values. - - :param network_profile: network profile object - """ - try: - min_ip, max_ip = (network_profile - ['multicast_ip_range'].split('-', 1)) - except ValueError: - msg = _("Invalid multicast ip address range. " - "example range: 224.1.1.1-224.1.1.10") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - for ip in [min_ip, max_ip]: - try: - if not netaddr.IPAddress(ip).is_multicast(): - msg = _("%s is not a valid multicast ip address") % ip - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if netaddr.IPAddress(ip) <= netaddr.IPAddress('224.0.0.255'): - msg = _("%s is reserved multicast ip address") % ip - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - except netaddr.AddrFormatError: - msg = _("%s is not a valid ip address") % ip - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if netaddr.IPAddress(min_ip) > netaddr.IPAddress(max_ip): - msg = (_("Invalid multicast IP range '%(min_ip)s-%(max_ip)s':" - " Range should be from low address to high address") % - {'min_ip': min_ip, 'max_ip': max_ip}) - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - - def _validate_network_profile(self, net_p): - """ - Validate completeness of a network profile arguments. - - :param net_p: network profile object - """ - if any(net_p[arg] == "" for arg in ["segment_type"]): - msg = _("Arguments segment_type missing" - " for network profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - segment_type = net_p["segment_type"].lower() - if segment_type not in [c_const.NETWORK_TYPE_VLAN, - c_const.NETWORK_TYPE_OVERLAY, - c_const.NETWORK_TYPE_TRUNK, - c_const.NETWORK_TYPE_MULTI_SEGMENT]: - msg = _("segment_type should either be vlan, overlay, " - "multi-segment or trunk") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if segment_type == c_const.NETWORK_TYPE_VLAN: - if "physical_network" not in net_p: - msg = _("Argument physical_network missing " - "for network profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if segment_type == c_const.NETWORK_TYPE_TRUNK: - if net_p["segment_range"]: - msg = _("segment_range not required for trunk") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if segment_type in [c_const.NETWORK_TYPE_TRUNK, - c_const.NETWORK_TYPE_OVERLAY]: - if not attributes.is_attr_set(net_p.get("sub_type")): - msg = _("Argument sub_type missing " - "for network profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if segment_type in [c_const.NETWORK_TYPE_VLAN, - c_const.NETWORK_TYPE_OVERLAY]: - if "segment_range" not in net_p: - msg = _("Argument segment_range missing " - "for network profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - self._validate_segment_range(net_p) - if segment_type == c_const.NETWORK_TYPE_OVERLAY: - if net_p['sub_type'] != c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: - net_p['multicast_ip_range'] = '0.0.0.0' - else: - multicast_ip_range = net_p.get("multicast_ip_range") - if not attributes.is_attr_set(multicast_ip_range): - msg = _("Argument multicast_ip_range missing" - " for VXLAN multicast network profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - self._validate_multicast_ip_range(net_p) - else: - net_p['multicast_ip_range'] = '0.0.0.0' - - def _validate_segment_range_uniqueness(self, context, net_p, id=None): - """ - Validate that segment range doesn't overlap. - - :param context: neutron api request context - :param net_p: network profile dictionary - :param id: UUID representing the network profile being updated - """ - segment_type = net_p["segment_type"].lower() - seg_min, seg_max = self._get_segment_range(net_p['segment_range']) - if segment_type == c_const.NETWORK_TYPE_VLAN: - if not ((seg_min <= seg_max) and - ((seg_min in range(constants.MIN_VLAN_TAG, - c_const.NEXUS_VLAN_RESERVED_MIN) and - seg_max in range(constants.MIN_VLAN_TAG, - c_const.NEXUS_VLAN_RESERVED_MIN)) or - (seg_min in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, - constants.MAX_VLAN_TAG) and - seg_max in range(c_const.NEXUS_VLAN_RESERVED_MAX + 1, - constants.MAX_VLAN_TAG)))): - msg = (_("Segment range is invalid, select from " - "%(min)s-%(nmin)s, %(nmax)s-%(max)s") % - {"min": constants.MIN_VLAN_TAG, - "nmin": c_const.NEXUS_VLAN_RESERVED_MIN - 1, - "nmax": c_const.NEXUS_VLAN_RESERVED_MAX + 1, - "max": constants.MAX_VLAN_TAG - 1}) - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - profiles = _get_network_profiles( - db_session=context.session, - physical_network=net_p["physical_network"] - ) - elif segment_type in [c_const.NETWORK_TYPE_OVERLAY, - c_const.NETWORK_TYPE_MULTI_SEGMENT, - c_const.NETWORK_TYPE_TRUNK]: - if (seg_min > seg_max or - seg_min < c_const.NEXUS_VXLAN_MIN or - seg_max > c_const.NEXUS_VXLAN_MAX): - msg = (_("segment range is invalid. Valid range is : " - "%(min)s-%(max)s") % - {"min": c_const.NEXUS_VXLAN_MIN, - "max": c_const.NEXUS_VXLAN_MAX}) - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - profiles = _get_network_profiles(db_session=context.session) - if profiles: - for profile in profiles: - if id and profile.id == id: - continue - name = profile.name - segment_range = profile.segment_range - if net_p["name"] == name: - msg = (_("NetworkProfile name %s already exists"), - net_p["name"]) - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - if (c_const.NETWORK_TYPE_MULTI_SEGMENT in - [profile.segment_type, net_p["segment_type"]] or - c_const.NETWORK_TYPE_TRUNK in - [profile.segment_type, net_p["segment_type"]]): - continue - seg_min, seg_max = self._get_segment_range( - net_p["segment_range"]) - profile_seg_min, profile_seg_max = self._get_segment_range( - segment_range) - if ((profile_seg_min <= seg_min <= profile_seg_max) or - (profile_seg_min <= seg_max <= profile_seg_max) or - ((seg_min <= profile_seg_min) and - (seg_max >= profile_seg_max))): - msg = _("Segment range overlaps with another profile") - LOG.error(msg) - raise n_exc.InvalidInput(error_message=msg) - - def _get_network_profile_by_name(self, db_session, name): - """ - Retrieve network profile based on name. - - :param db_session: database session - :param name: string representing the name for the network profile - :returns: network profile object - """ - with db_session.begin(subtransactions=True): - try: - return (db_session.query(n1kv_models_v2.NetworkProfile). - filter_by(name=name).one()) - except exc.NoResultFound: - raise c_exc.NetworkProfileNotFound(profile=name) - - -class PolicyProfile_db_mixin(object): - - """Policy Profile Mixin.""" - - def _get_policy_collection_for_tenant(self, db_session, model, tenant_id): - profile_ids = (db_session.query(n1kv_models_v2. - ProfileBinding.profile_id) - .filter_by(tenant_id=tenant_id). - filter_by(profile_type=c_const.POLICY).all()) - profiles = db_session.query(model).filter(model.id.in_( - pid[0] for pid in profile_ids)) - return [self._make_policy_profile_dict(p) for p in profiles] - - def _make_policy_profile_dict(self, policy_profile, fields=None): - res = {"id": policy_profile["id"], "name": policy_profile["name"]} - return self._fields(res, fields) - - def _make_profile_bindings_dict(self, profile_binding, fields=None): - res = {"profile_id": profile_binding["profile_id"], - "tenant_id": profile_binding["tenant_id"]} - return self._fields(res, fields) - - def _policy_profile_exists(self, id): - db_session = db.get_session() - return (db_session.query(n1kv_models_v2.PolicyProfile). - filter_by(id=id).first()) - - def get_policy_profile(self, context, id, fields=None): - """ - Retrieve a policy profile for the given UUID. - - :param context: neutron api request context - :param id: UUID representing policy profile to fetch - :params fields: a list of strings that are valid keys in a policy - profile dictionary. Only these fields will be returned - :returns: policy profile dictionary - """ - profile = get_policy_profile(context.session, id) - return self._make_policy_profile_dict(profile, fields) - - def get_policy_profiles(self, context, filters=None, fields=None): - """ - Retrieve a list of policy profiles. - - Retrieve all policy profiles if tenant is admin. For a non-admin - tenant, retrieve all policy profiles belonging to this tenant only. - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - policy profile object. Values in this dictiontary are - an iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a policy - profile dictionary. Only these fields will be returned - :returns: list of all policy profiles - """ - if context.is_admin: - return self._get_collection(context, n1kv_models_v2.PolicyProfile, - self._make_policy_profile_dict, - filters=filters, fields=fields) - else: - return self._get_policy_collection_for_tenant(context.session, - n1kv_models_v2. - PolicyProfile, - context.tenant_id) - - def get_policy_profile_bindings(self, context, filters=None, fields=None): - """ - Retrieve a list of profile bindings for policy profiles. - - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - profile bindings object. Values in this dictiontary are - an iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a profile - bindings dictionary. Only these fields will be returned - :returns: list of profile bindings - """ - if context.is_admin: - profile_bindings = _get_profile_bindings( - context.session, - profile_type=c_const.POLICY) - return [self._make_profile_bindings_dict(pb) - for pb in profile_bindings] - - def update_policy_profile(self, context, id, policy_profile): - """ - Update a policy profile. - - Add/remove policy profile to tenant-id binding for the corresponding - option and if user is admin. - :param context: neutron api request context - :param id: UUID representing policy profile to update - :param policy_profile: policy profile dictionary - :returns: updated policy profile dictionary - """ - p = policy_profile["policy_profile"] - if context.is_admin and "add_tenant" in p: - self.add_policy_profile_tenant(context.session, - id, - p["add_tenant"]) - return self._make_policy_profile_dict(get_policy_profile( - context.session, id)) - if context.is_admin and "remove_tenant" in p: - delete_profile_binding(context.session, p["remove_tenant"], id) - return self._make_policy_profile_dict(get_policy_profile( - context.session, id)) - return self._make_policy_profile_dict( - update_policy_profile(context.session, id, p)) - - def add_policy_profile_tenant(self, - db_session, - policy_profile_id, - tenant_id): - """ - Add a tenant to a policy profile binding. - - :param db_session: database session - :param policy_profile_id: UUID representing policy profile - :param tenant_id: UUID representing the tenant - :returns: profile binding object - """ - return create_profile_binding(db_session, - tenant_id, - policy_profile_id, - c_const.POLICY) - - def remove_policy_profile_tenant(self, policy_profile_id, tenant_id): - """ - Remove a tenant to a policy profile binding. - - :param policy_profile_id: UUID representing policy profile - :param tenant_id: UUID representing the tenant - """ - delete_profile_binding(None, tenant_id, policy_profile_id) - - def _delete_policy_profile(self, policy_profile_id): - """Delete policy profile and associated binding.""" - db_session = db.get_session() - with db_session.begin(subtransactions=True): - (db_session.query(n1kv_models_v2.PolicyProfile). - filter_by(id=policy_profile_id).delete()) - - def _get_policy_profile_by_name(self, name): - """ - Retrieve policy profile based on name. - - :param name: string representing the name for the policy profile - :returns: policy profile object - """ - db_session = db.get_session() - with db_session.begin(subtransactions=True): - return (db_session.query(n1kv_models_v2.PolicyProfile). - filter_by(name=name).one()) - - def _remove_all_fake_policy_profiles(self): - """ - Remove all policy profiles associated with fake tenant id. - - This will find all Profile ID where tenant is not set yet - set A - and profiles where tenant was already set - set B - and remove what is in both and no tenant id set - """ - db_session = db.get_session() - with db_session.begin(subtransactions=True): - a_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). - filter_by(tenant_id=c_const.TENANT_ID_NOT_SET, - profile_type=c_const.POLICY)) - a_set = set(i.profile_id for i in a_set_q) - b_set_q = (db_session.query(n1kv_models_v2.ProfileBinding). - filter(sql.and_(n1kv_models_v2.ProfileBinding. - tenant_id != c_const.TENANT_ID_NOT_SET, - n1kv_models_v2.ProfileBinding. - profile_type == c_const.POLICY))) - b_set = set(i.profile_id for i in b_set_q) - (db_session.query(n1kv_models_v2.ProfileBinding). - filter(sql.and_(n1kv_models_v2.ProfileBinding.profile_id. - in_(a_set & b_set), - n1kv_models_v2.ProfileBinding.tenant_id == - c_const.TENANT_ID_NOT_SET)). - delete(synchronize_session="fetch")) - - def _add_policy_profile(self, - policy_profile_name, - policy_profile_id, - tenant_id=None): - """ - Add Policy profile and tenant binding. - - :param policy_profile_name: string representing the name for the - policy profile - :param policy_profile_id: UUID representing the policy profile - :param tenant_id: UUID representing the tenant - """ - policy_profile = {"id": policy_profile_id, "name": policy_profile_name} - tenant_id = tenant_id or c_const.TENANT_ID_NOT_SET - if not self._policy_profile_exists(policy_profile_id): - create_policy_profile(policy_profile) - create_profile_binding(None, - tenant_id, - policy_profile["id"], - c_const.POLICY) diff --git a/neutron/plugins/cisco/db/n1kv_models_v2.py b/neutron/plugins/cisco/db/n1kv_models_v2.py deleted file mode 100644 index 6c81aabba..000000000 --- a/neutron/plugins/cisco/db/n1kv_models_v2.py +++ /dev/null @@ -1,185 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems Inc. -# @author: Rudrajit Tapadar, Cisco Systems Inc. - -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.cisco.common import cisco_constants - - -LOG = logging.getLogger(__name__) - - -class N1kvVlanAllocation(model_base.BASEV2): - - """Represents allocation state of vlan_id on physical network.""" - __tablename__ = 'cisco_n1kv_vlan_allocations' - - physical_network = sa.Column(sa.String(64), - nullable=False, - primary_key=True) - vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False) - network_profile_id = sa.Column(sa.String(36), - sa.ForeignKey('cisco_network_profiles.id', - ondelete="CASCADE"), - nullable=False) - - -class N1kvVxlanAllocation(model_base.BASEV2): - - """Represents allocation state of vxlan_id.""" - __tablename__ = 'cisco_n1kv_vxlan_allocations' - - vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False) - network_profile_id = sa.Column(sa.String(36), - sa.ForeignKey('cisco_network_profiles.id', - ondelete="CASCADE"), - nullable=False) - - -class N1kvPortBinding(model_base.BASEV2): - - """Represents binding of ports to policy profile.""" - __tablename__ = 'cisco_n1kv_port_bindings' - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - profile_id = sa.Column(sa.String(36), - sa.ForeignKey('cisco_policy_profiles.id')) - - -class N1kvNetworkBinding(model_base.BASEV2): - - """Represents binding of virtual network to physical realization.""" - __tablename__ = 'cisco_n1kv_network_bindings' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - network_type = sa.Column(sa.String(32), nullable=False) - physical_network = sa.Column(sa.String(64)) - segmentation_id = sa.Column(sa.Integer) - multicast_ip = sa.Column(sa.String(32)) - profile_id = sa.Column(sa.String(36), - sa.ForeignKey('cisco_network_profiles.id')) - - -class N1kVmNetwork(model_base.BASEV2): - - """Represents VM Network information.""" - __tablename__ = 'cisco_n1kv_vmnetworks' - - name = sa.Column(sa.String(80), primary_key=True) - profile_id = sa.Column(sa.String(36), - sa.ForeignKey('cisco_policy_profiles.id')) - network_id = sa.Column(sa.String(36)) - port_count = sa.Column(sa.Integer) - - -class NetworkProfile(model_base.BASEV2, models_v2.HasId): - - """ - Nexus1000V Network Profiles - - segment_type - VLAN, OVERLAY, TRUNK, MULTI_SEGMENT - sub_type - TRUNK_VLAN, TRUNK_VXLAN, native_vxlan, enhanced_vxlan - segment_range - '-' - multicast_ip_index - - multicast_ip_range - '-' - physical_network - Name for the physical network - """ - __tablename__ = 'cisco_network_profiles' - - name = sa.Column(sa.String(255)) - segment_type = sa.Column(sa.Enum(cisco_constants.NETWORK_TYPE_VLAN, - cisco_constants.NETWORK_TYPE_OVERLAY, - cisco_constants.NETWORK_TYPE_TRUNK, - cisco_constants. - NETWORK_TYPE_MULTI_SEGMENT, - name='segment_type'), - nullable=False) - sub_type = sa.Column(sa.String(255)) - segment_range = sa.Column(sa.String(255)) - multicast_ip_index = sa.Column(sa.Integer, default=0) - multicast_ip_range = sa.Column(sa.String(255)) - physical_network = sa.Column(sa.String(255)) - - -class PolicyProfile(model_base.BASEV2): - - """ - Nexus1000V Network Profiles - - Both 'id' and 'name' are coming from Nexus1000V switch - """ - __tablename__ = 'cisco_policy_profiles' - - id = sa.Column(sa.String(36), primary_key=True) - name = sa.Column(sa.String(255)) - - -class ProfileBinding(model_base.BASEV2): - - """ - Represents a binding of Network Profile - or Policy Profile to tenant_id - """ - __tablename__ = 'cisco_n1kv_profile_bindings' - - profile_type = sa.Column(sa.Enum(cisco_constants.NETWORK, - cisco_constants.POLICY, - name='profile_type')) - tenant_id = sa.Column(sa.String(36), - primary_key=True, - default=cisco_constants.TENANT_ID_NOT_SET) - profile_id = sa.Column(sa.String(36), primary_key=True) - - -class N1kvTrunkSegmentBinding(model_base.BASEV2): - - """Represents binding of segments in trunk networks.""" - __tablename__ = 'cisco_n1kv_trunk_segments' - - trunk_segment_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', - ondelete="CASCADE"), - primary_key=True) - segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True) - dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True) - - -class N1kvMultiSegmentNetworkBinding(model_base.BASEV2): - - """Represents binding of segments in multi-segment networks.""" - __tablename__ = 'cisco_n1kv_multi_segments' - - multi_segment_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', - ondelete="CASCADE"), - primary_key=True) - segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True) - segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True) - encap_profile_name = sa.Column(sa.String(36)) diff --git a/neutron/plugins/cisco/db/network_db_v2.py b/neutron/plugins/cisco/db/network_db_v2.py deleted file mode 100644 index 94c5a37de..000000000 --- a/neutron/plugins/cisco/db/network_db_v2.py +++ /dev/null @@ -1,290 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Rohit Agarwalla, Cisco Systems, Inc. - -from sqlalchemy.orm import exc - -from neutron.db import api as db -from neutron.openstack.common import log as logging -from neutron.openstack.common import uuidutils -from neutron.plugins.cisco.common import cisco_constants as const -from neutron.plugins.cisco.common import cisco_exceptions as c_exc -from neutron.plugins.cisco.db import network_models_v2 -# Do NOT remove this import. It is required for all the models to be seen -# by db.initialize() when called from VirtualPhysicalSwitchModelV2.__init__. -from neutron.plugins.cisco.db import nexus_models_v2 # noqa -from neutron.plugins.openvswitch import ovs_models_v2 - - -LOG = logging.getLogger(__name__) - - -def get_all_qoss(tenant_id): - """Lists all the qos to tenant associations.""" - LOG.debug(_("get_all_qoss() called")) - session = db.get_session() - return (session.query(network_models_v2.QoS). - filter_by(tenant_id=tenant_id).all()) - - -def get_qos(tenant_id, qos_id): - """Lists the qos given a tenant_id and qos_id.""" - LOG.debug(_("get_qos() called")) - session = db.get_session() - try: - return (session.query(network_models_v2.QoS). - filter_by(tenant_id=tenant_id). - filter_by(qos_id=qos_id).one()) - except exc.NoResultFound: - raise c_exc.QosNotFound(qos_id=qos_id, - tenant_id=tenant_id) - - -def add_qos(tenant_id, qos_name, qos_desc): - """Adds a qos to tenant association.""" - LOG.debug(_("add_qos() called")) - session = db.get_session() - try: - qos = (session.query(network_models_v2.QoS). - filter_by(tenant_id=tenant_id). - filter_by(qos_name=qos_name).one()) - raise c_exc.QosNameAlreadyExists(qos_name=qos_name, - tenant_id=tenant_id) - except exc.NoResultFound: - qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(), - tenant_id=tenant_id, - qos_name=qos_name, - qos_desc=qos_desc) - session.add(qos) - session.flush() - return qos - - -def remove_qos(tenant_id, qos_id): - """Removes a qos to tenant association.""" - session = db.get_session() - try: - qos = (session.query(network_models_v2.QoS). - filter_by(tenant_id=tenant_id). - filter_by(qos_id=qos_id).one()) - session.delete(qos) - session.flush() - return qos - except exc.NoResultFound: - pass - - -def update_qos(tenant_id, qos_id, new_qos_name=None): - """Updates a qos to tenant association.""" - session = db.get_session() - try: - qos = (session.query(network_models_v2.QoS). - filter_by(tenant_id=tenant_id). - filter_by(qos_id=qos_id).one()) - if new_qos_name: - qos["qos_name"] = new_qos_name - session.merge(qos) - session.flush() - return qos - except exc.NoResultFound: - raise c_exc.QosNotFound(qos_id=qos_id, - tenant_id=tenant_id) - - -def get_all_credentials(): - """Lists all the creds for a tenant.""" - session = db.get_session() - return (session.query(network_models_v2.Credential).all()) - - -def get_credential(credential_id): - """Lists the creds for given a cred_id.""" - session = db.get_session() - try: - return (session.query(network_models_v2.Credential). - filter_by(credential_id=credential_id).one()) - except exc.NoResultFound: - raise c_exc.CredentialNotFound(credential_id=credential_id) - - -def get_credential_name(credential_name): - """Lists the creds for given a cred_name.""" - session = db.get_session() - try: - return (session.query(network_models_v2.Credential). - filter_by(credential_name=credential_name).one()) - except exc.NoResultFound: - raise c_exc.CredentialNameNotFound(credential_name=credential_name) - - -def add_credential(credential_name, user_name, password, type): - """Create a credential.""" - session = db.get_session() - try: - cred = (session.query(network_models_v2.Credential). - filter_by(credential_name=credential_name).one()) - raise c_exc.CredentialAlreadyExists(credential_name=credential_name) - except exc.NoResultFound: - cred = network_models_v2.Credential( - credential_id=uuidutils.generate_uuid(), - credential_name=credential_name, - user_name=user_name, - password=password, - type=type) - session.add(cred) - session.flush() - return cred - - -def remove_credential(credential_id): - """Removes a credential.""" - session = db.get_session() - try: - cred = (session.query(network_models_v2.Credential). - filter_by(credential_id=credential_id).one()) - session.delete(cred) - session.flush() - return cred - except exc.NoResultFound: - pass - - -def update_credential(credential_id, - new_user_name=None, new_password=None): - """Updates a credential for a tenant.""" - session = db.get_session() - try: - cred = (session.query(network_models_v2.Credential). - filter_by(credential_id=credential_id).one()) - if new_user_name: - cred["user_name"] = new_user_name - if new_password: - cred["password"] = new_password - session.merge(cred) - session.flush() - return cred - except exc.NoResultFound: - raise c_exc.CredentialNotFound(credential_id=credential_id) - - -def get_all_n1kv_credentials(): - session = db.get_session() - return (session.query(network_models_v2.Credential). - filter_by(type='n1kv')) - - -def add_provider_network(network_id, network_type, segmentation_id): - """Add a network to the provider network table.""" - session = db.get_session() - if session.query(network_models_v2.ProviderNetwork).filter_by( - network_id=network_id).first(): - raise c_exc.ProviderNetworkExists(network_id) - pnet = network_models_v2.ProviderNetwork(network_id=network_id, - network_type=network_type, - segmentation_id=segmentation_id) - session.add(pnet) - session.flush() - - -def remove_provider_network(network_id): - """Remove network_id from the provider network table. - - :param network_id: Any network id. If it is not in the table, do nothing. - :return: network_id if it was in the table and successfully removed. - """ - session = db.get_session() - pnet = (session.query(network_models_v2.ProviderNetwork). - filter_by(network_id=network_id).first()) - if pnet: - session.delete(pnet) - session.flush() - return network_id - - -def is_provider_network(network_id): - """Return True if network_id is in the provider network table.""" - session = db.get_session() - if session.query(network_models_v2.ProviderNetwork).filter_by( - network_id=network_id).first(): - return True - - -def is_provider_vlan(vlan_id): - """Check for a for a vlan provider network with the specified vland_id. - - Returns True if the provider network table contains a vlan network - with the specified vlan_id. - """ - session = db.get_session() - if (session.query(network_models_v2.ProviderNetwork). - filter_by(network_type=const.NETWORK_TYPE_VLAN, - segmentation_id=vlan_id).first()): - return True - - -def get_ovs_vlans(): - session = db.get_session() - bindings = (session.query(ovs_models_v2.VlanAllocation.vlan_id). - filter_by(allocated=True)) - return [binding.vlan_id for binding in bindings] - - -class Credential_db_mixin(object): - - """Mixin class for Cisco Credentials as a resource.""" - - def _make_credential_dict(self, credential, fields=None): - res = {'credential_id': credential['credential_id'], - 'credential_name': credential['credential_name'], - 'user_name': credential['user_name'], - 'password': credential['password'], - 'type': credential['type']} - return self._fields(res, fields) - - def create_credential(self, context, credential): - """Create a credential.""" - c = credential['credential'] - cred = add_credential(c['credential_name'], - c['user_name'], - c['password'], - c['type']) - return self._make_credential_dict(cred) - - def get_credentials(self, context, filters=None, fields=None): - """Retrieve a list of credentials.""" - return self._get_collection(context, - network_models_v2.Credential, - self._make_credential_dict, - filters=filters, - fields=fields) - - def get_credential(self, context, id, fields=None): - """Retireve the requested credential based on its id.""" - credential = get_credential(id) - return self._make_credential_dict(credential, fields) - - def update_credential(self, context, id, credential): - """Update a credential based on its id.""" - c = credential['credential'] - cred = update_credential(id, - c['user_name'], - c['password']) - return self._make_credential_dict(cred) - - def delete_credential(self, context, id): - """Delete a credential based on its id.""" - return remove_credential(id) diff --git a/neutron/plugins/cisco/db/network_models_v2.py b/neutron/plugins/cisco/db/network_models_v2.py deleted file mode 100644 index 49768371d..000000000 --- a/neutron/plugins/cisco/db/network_models_v2.py +++ /dev/null @@ -1,56 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Rohit Agarwalla, Cisco Systems, Inc. - -import sqlalchemy as sa - -from neutron.db import model_base - - -class QoS(model_base.BASEV2): - """Represents QoS policies for a tenant.""" - - __tablename__ = 'cisco_qos_policies' - - qos_id = sa.Column(sa.String(255)) - tenant_id = sa.Column(sa.String(255), primary_key=True) - qos_name = sa.Column(sa.String(255), primary_key=True) - qos_desc = sa.Column(sa.String(255)) - - -class Credential(model_base.BASEV2): - """Represents credentials for a tenant to control Cisco switches.""" - - __tablename__ = 'cisco_credentials' - - credential_id = sa.Column(sa.String(255)) - credential_name = sa.Column(sa.String(255), primary_key=True) - user_name = sa.Column(sa.String(255)) - password = sa.Column(sa.String(255)) - type = sa.Column(sa.String(255)) - - -class ProviderNetwork(model_base.BASEV2): - """Represents networks that were created as provider networks.""" - - __tablename__ = 'cisco_provider_networks' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - network_type = sa.Column(sa.String(255), nullable=False) - segmentation_id = sa.Column(sa.Integer, nullable=False) diff --git a/neutron/plugins/cisco/db/nexus_db_v2.py b/neutron/plugins/cisco/db/nexus_db_v2.py deleted file mode 100644 index a11a8a041..000000000 --- a/neutron/plugins/cisco/db/nexus_db_v2.py +++ /dev/null @@ -1,154 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Rohit Agarwalla, Cisco Systems, Inc. -# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) -# - -import sqlalchemy.orm.exc as sa_exc - -import neutron.db.api as db -from neutron.openstack.common import log as logging -from neutron.plugins.cisco.common import cisco_exceptions as c_exc -from neutron.plugins.cisco.db import nexus_models_v2 - - -LOG = logging.getLogger(__name__) - - -def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Lists a nexusport binding.""" - LOG.debug(_("get_nexusport_binding() called")) - return _lookup_all_nexus_bindings(port_id=port_id, - vlan_id=vlan_id, - switch_ip=switch_ip, - instance_id=instance_id) - - -def get_nexusvlan_binding(vlan_id, switch_ip): - """Lists a vlan and switch binding.""" - LOG.debug(_("get_nexusvlan_binding() called")) - return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) - - -def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Adds a nexusport binding.""" - LOG.debug(_("add_nexusport_binding() called")) - session = db.get_session() - binding = nexus_models_v2.NexusPortBinding(port_id=port_id, - vlan_id=vlan_id, - switch_ip=switch_ip, - instance_id=instance_id) - session.add(binding) - session.flush() - return binding - - -def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Removes a nexusport binding.""" - LOG.debug(_("remove_nexusport_binding() called")) - session = db.get_session() - binding = _lookup_all_nexus_bindings(session=session, - vlan_id=vlan_id, - switch_ip=switch_ip, - port_id=port_id, - instance_id=instance_id) - for bind in binding: - session.delete(bind) - session.flush() - return binding - - -def update_nexusport_binding(port_id, new_vlan_id): - """Updates nexusport binding.""" - if not new_vlan_id: - LOG.warning(_("update_nexusport_binding called with no vlan")) - return - LOG.debug(_("update_nexusport_binding called")) - session = db.get_session() - binding = _lookup_one_nexus_binding(session=session, port_id=port_id) - binding.vlan_id = new_vlan_id - session.merge(binding) - session.flush() - return binding - - -def get_nexusvm_bindings(vlan_id, instance_id): - """Lists nexusvm bindings.""" - LOG.debug(_("get_nexusvm_binding() called")) - - return _lookup_all_nexus_bindings(vlan_id=vlan_id, - instance_id=instance_id) - - -def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): - """Lists nexusvm bindings.""" - LOG.debug(_("get_port_vlan_switch_binding() called")) - return _lookup_all_nexus_bindings(port_id=port_id, - switch_ip=switch_ip, - vlan_id=vlan_id) - - -def get_port_switch_bindings(port_id, switch_ip): - """List all vm/vlan bindings on a Nexus switch port.""" - LOG.debug(_("get_port_switch_bindings() called, " - "port:'%(port_id)s', switch:'%(switch_ip)s'"), - {'port_id': port_id, 'switch_ip': switch_ip}) - try: - return _lookup_all_nexus_bindings(port_id=port_id, - switch_ip=switch_ip) - except c_exc.NexusPortBindingNotFound: - pass - - -def get_nexussvi_bindings(): - """Lists nexus svi bindings.""" - LOG.debug(_("get_nexussvi_bindings() called")) - return _lookup_all_nexus_bindings(port_id='router') - - -def _lookup_nexus_bindings(query_type, session=None, **bfilter): - """Look up 'query_type' Nexus bindings matching the filter. - - :param query_type: 'all', 'one' or 'first' - :param session: db session - :param bfilter: filter for bindings query - :return: bindings if query gave a result, else - raise NexusPortBindingNotFound. - """ - if session is None: - session = db.get_session() - query_method = getattr(session.query( - nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) - try: - bindings = query_method() - if bindings: - return bindings - except sa_exc.NoResultFound: - pass - raise c_exc.NexusPortBindingNotFound(**bfilter) - - -def _lookup_all_nexus_bindings(session=None, **bfilter): - return _lookup_nexus_bindings('all', session, **bfilter) - - -def _lookup_one_nexus_binding(session=None, **bfilter): - return _lookup_nexus_bindings('one', session, **bfilter) - - -def _lookup_first_nexus_binding(session=None, **bfilter): - return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/neutron/plugins/cisco/db/nexus_models_v2.py b/neutron/plugins/cisco/db/nexus_models_v2.py deleted file mode 100644 index e639e47c6..000000000 --- a/neutron/plugins/cisco/db/nexus_models_v2.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Rohit Agarwalla, Cisco Systems, Inc. - -import sqlalchemy as sa - -from neutron.db import model_base - - -class NexusPortBinding(model_base.BASEV2): - """Represents a binding of VM's to nexus ports.""" - - __tablename__ = "cisco_nexusport_bindings" - - id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - port_id = sa.Column(sa.String(255)) - vlan_id = sa.Column(sa.Integer, nullable=False) - switch_ip = sa.Column(sa.String(255), nullable=False) - instance_id = sa.Column(sa.String(255), nullable=False) - - def __repr__(self): - """Just the binding, without the id key.""" - return ("" % - (self.port_id, self.vlan_id, self.switch_ip, self.instance_id)) - - def __eq__(self, other): - """Compare only the binding, without the id key.""" - return ( - self.port_id == other.port_id and - self.vlan_id == other.vlan_id and - self.switch_ip == other.switch_ip and - self.instance_id == other.instance_id - ) diff --git a/neutron/plugins/cisco/extensions/__init__.py b/neutron/plugins/cisco/extensions/__init__.py deleted file mode 100644 index 63082aba2..000000000 --- a/neutron/plugins/cisco/extensions/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/cisco/extensions/_credential_view.py b/neutron/plugins/cisco/extensions/_credential_view.py deleted file mode 100644 index 9dcbbc81e..000000000 --- a/neutron/plugins/cisco/extensions/_credential_view.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ying Liu, Cisco Systems, Inc. -# - - -def get_view_builder(req): - base_url = req.application_url - return ViewBuilder(base_url) - - -class ViewBuilder(object): - """ViewBuilder for Credential, derived from neutron.views.networks.""" - - def __init__(self, base_url): - """Initialize builder. - - :param base_url: url of the root wsgi application - """ - self.base_url = base_url - - def build(self, credential_data, is_detail=False): - """Generic method used to generate a credential entity.""" - if is_detail: - credential = self._build_detail(credential_data) - else: - credential = self._build_simple(credential_data) - return credential - - def _build_simple(self, credential_data): - """Return a simple description of credential.""" - return dict(credential=dict(id=credential_data['credential_id'])) - - def _build_detail(self, credential_data): - """Return a detailed description of credential.""" - return dict(credential=dict(id=credential_data['credential_id'], - name=credential_data['user_name'], - password=credential_data['password'])) diff --git a/neutron/plugins/cisco/extensions/_qos_view.py b/neutron/plugins/cisco/extensions/_qos_view.py deleted file mode 100644 index 81ef5fef6..000000000 --- a/neutron/plugins/cisco/extensions/_qos_view.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ying Liu, Cisco Systems, Inc. -# - - -def get_view_builder(req): - base_url = req.application_url - return ViewBuilder(base_url) - - -class ViewBuilder(object): - """ViewBuilder for QoS, derived from neutron.views.networks.""" - - def __init__(self, base_url): - """Initialize builder. - - :param base_url: url of the root wsgi application - """ - self.base_url = base_url - - def build(self, qos_data, is_detail=False): - """Generic method used to generate a QoS entity.""" - if is_detail: - qos = self._build_detail(qos_data) - else: - qos = self._build_simple(qos_data) - return qos - - def _build_simple(self, qos_data): - """Return a simple description of qos.""" - return dict(qos=dict(id=qos_data['qos_id'])) - - def _build_detail(self, qos_data): - """Return a detailed description of qos.""" - return dict(qos=dict(id=qos_data['qos_id'], - name=qos_data['qos_name'], - description=qos_data['qos_desc'])) diff --git a/neutron/plugins/cisco/extensions/credential.py b/neutron/plugins/cisco/extensions/credential.py deleted file mode 100644 index 8838136c1..000000000 --- a/neutron/plugins/cisco/extensions/credential.py +++ /dev/null @@ -1,84 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ying Liu, Cisco Systems, Inc. -# @author: Abhishek Raut, Cisco Systems, Inc - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import base -from neutron import manager - - -# Attribute Map -RESOURCE_ATTRIBUTE_MAP = { - 'credentials': { - 'credential_id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'is_visible': True}, - 'credential_name': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'is_visible': False, 'default': ''}, - 'type': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - 'user_name': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - 'password': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - }, -} - - -class Credential(extensions.ExtensionDescriptor): - - @classmethod - def get_name(cls): - """Returns Extended Resource Name.""" - return "Cisco Credential" - - @classmethod - def get_alias(cls): - """Returns Extended Resource Alias.""" - return "credential" - - @classmethod - def get_description(cls): - """Returns Extended Resource Description.""" - return "Credential include username and password" - - @classmethod - def get_namespace(cls): - """Returns Extended Resource Namespace.""" - return "http://docs.ciscocloud.com/api/ext/credential/v2.0" - - @classmethod - def get_updated(cls): - """Returns Extended Resource Update Time.""" - return "2011-07-25T13:25:27-06:00" - - @classmethod - def get_resources(cls): - """Returns Extended Resources.""" - resource_name = "credential" - collection_name = resource_name + "s" - plugin = manager.NeutronManager.get_plugin() - params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict()) - controller = base.create_resource(collection_name, - resource_name, - plugin, params) - return [extensions.ResourceExtension(collection_name, - controller)] diff --git a/neutron/plugins/cisco/extensions/n1kv.py b/neutron/plugins/cisco/extensions/n1kv.py deleted file mode 100644 index 352ad816a..000000000 --- a/neutron/plugins/cisco/extensions/n1kv.py +++ /dev/null @@ -1,106 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems, Inc. -# @author: Rudrajit Tapadar, Cisco Systems, Inc. -# @author: Aruna Kushwaha, Cisco Systems, Inc. -# @author: Sergey Sudakovich, Cisco Systems, Inc. - -from neutron.api import extensions -from neutron.api.v2 import attributes - - -PROFILE_ID = 'n1kv:profile_id' -MULTICAST_IP = 'n1kv:multicast_ip' -SEGMENT_ADD = 'n1kv:segment_add' -SEGMENT_DEL = 'n1kv:segment_del' -MEMBER_SEGMENTS = 'n1kv:member_segments' - -EXTENDED_ATTRIBUTES_2_0 = { - 'networks': { - PROFILE_ID: {'allow_post': True, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - MULTICAST_IP: {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - SEGMENT_ADD: {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - SEGMENT_DEL: {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - MEMBER_SEGMENTS: {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - }, - 'ports': { - PROFILE_ID: {'allow_post': True, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True} - } -} - - -class N1kv(extensions.ExtensionDescriptor): - - """Extension class supporting N1kv profiles. - - This class is used by neutron's extension framework to make - metadata about the n1kv profile extension available to - clients. No new resources are defined by this extension. Instead, - the existing network resource's request and response messages are - extended with attributes in the n1kv profile namespace. - - To create a network based on n1kv profile using the CLI with admin rights: - - (shell) net-create --tenant_id \ - --n1kv:profile_id - (shell) port-create --tenant_id \ - --n1kv:profile_id - - - With admin rights, network dictionaries returned from CLI commands - will also include n1kv profile attributes. - """ - - @classmethod - def get_name(cls): - return "n1kv" - - @classmethod - def get_alias(cls): - return "n1kv" - - @classmethod - def get_description(cls): - return "Expose network profile" - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/api/v2.0" - - @classmethod - def get_updated(cls): - return "2012-11-15T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/cisco/extensions/network_profile.py b/neutron/plugins/cisco/extensions/network_profile.py deleted file mode 100644 index bb05bd944..000000000 --- a/neutron/plugins/cisco/extensions/network_profile.py +++ /dev/null @@ -1,103 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems, Inc. -# @author: Sergey Sudakovich, Cisco Systems, Inc. -# @author: Rudrajit Tapadar, Cisco Systems, Inc. - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import base -from neutron import manager - - -# Attribute Map -RESOURCE_ATTRIBUTE_MAP = { - 'network_profiles': { - 'id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - 'segment_type': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': ''}, - 'sub_type': {'allow_post': True, 'allow_put': False, - 'is_visible': True, - 'default': attributes.ATTR_NOT_SPECIFIED}, - 'segment_range': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': ''}, - 'multicast_ip_range': {'allow_post': True, 'allow_put': True, - 'is_visible': True, - 'default': attributes.ATTR_NOT_SPECIFIED}, - 'multicast_ip_index': {'allow_post': False, 'allow_put': False, - 'is_visible': False, 'default': '0'}, - 'physical_network': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': ''}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'is_visible': False, 'default': ''}, - 'add_tenant': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': None}, - 'remove_tenant': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': None}, - }, - 'network_profile_bindings': { - 'profile_id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'is_visible': True}, - }, -} - - -class Network_profile(extensions.ExtensionDescriptor): - - @classmethod - def get_name(cls): - return "Cisco N1kv Network Profiles" - - @classmethod - def get_alias(cls): - return 'network_profile' - - @classmethod - def get_description(cls): - return ("Profile includes the type of profile for N1kv") - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0" - - @classmethod - def get_updated(cls): - return "2012-07-20T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Extended Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - for resource_name in ['network_profile', 'network_profile_binding']: - collection_name = resource_name + "s" - controller = base.create_resource( - collection_name, - resource_name, - plugin, - RESOURCE_ATTRIBUTE_MAP.get(collection_name)) - ex = extensions.ResourceExtension(collection_name, - controller) - exts.append(ex) - return exts diff --git a/neutron/plugins/cisco/extensions/policy_profile.py b/neutron/plugins/cisco/extensions/policy_profile.py deleted file mode 100644 index af3c25083..000000000 --- a/neutron/plugins/cisco/extensions/policy_profile.py +++ /dev/null @@ -1,85 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems, Inc. -# @author: Sergey Sudakovich, Cisco Systems, Inc. - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import base -from neutron import manager - -# Attribute Map -RESOURCE_ATTRIBUTE_MAP = { - 'policy_profiles': { - 'id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'is_visible': True}, - 'name': {'allow_post': False, 'allow_put': False, - 'is_visible': True, 'default': ''}, - 'add_tenant': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': None}, - 'remove_tenant': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'default': None}, - }, - 'policy_profile_bindings': { - 'profile_id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:regex': attributes.UUID_PATTERN}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'is_visible': True}, - }, -} - - -class Policy_profile(extensions.ExtensionDescriptor): - - @classmethod - def get_name(cls): - return "Cisco Nexus1000V Policy Profiles" - - @classmethod - def get_alias(cls): - return 'policy_profile' - - @classmethod - def get_description(cls): - return "Profile includes the type of profile for N1kv" - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/policy-profile/api/v2.0" - - @classmethod - def get_updated(cls): - return "2012-07-20T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Extended Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - for resource_name in ['policy_profile', 'policy_profile_binding']: - collection_name = resource_name + "s" - controller = base.create_resource( - collection_name, - resource_name, - plugin, - RESOURCE_ATTRIBUTE_MAP.get(collection_name)) - ex = extensions.ResourceExtension(collection_name, - controller) - exts.append(ex) - return exts diff --git a/neutron/plugins/cisco/extensions/qos.py b/neutron/plugins/cisco/extensions/qos.py deleted file mode 100644 index 255601b5b..000000000 --- a/neutron/plugins/cisco/extensions/qos.py +++ /dev/null @@ -1,156 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Cisco Systems, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ying Liu, Cisco Systems, Inc. -# - -from webob import exc - -from neutron.api import api_common as common -from neutron.api import extensions -from neutron import manager -from neutron.plugins.cisco.common import cisco_exceptions as exception -from neutron.plugins.cisco.common import cisco_faults as faults -from neutron.plugins.cisco.extensions import _qos_view as qos_view -from neutron import wsgi - - -class Qos(extensions.ExtensionDescriptor): - """Qos extension file.""" - - @classmethod - def get_name(cls): - """Returns Ext Resource Name.""" - return "Cisco qos" - - @classmethod - def get_alias(cls): - """Returns Ext Resource Alias.""" - return "Cisco qos" - - @classmethod - def get_description(cls): - """Returns Ext Resource Description.""" - return "qos includes qos_name and qos_desc" - - @classmethod - def get_namespace(cls): - """Returns Ext Resource Namespace.""" - return "http://docs.ciscocloud.com/api/ext/qos/v1.0" - - @classmethod - def get_updated(cls): - """Returns Ext Resource update.""" - return "2011-07-25T13:25:27-06:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - parent_resource = dict(member_name="tenant", - collection_name="extensions/csco/tenants") - - controller = QosController(manager.NeutronManager.get_plugin()) - return [extensions.ResourceExtension('qoss', controller, - parent=parent_resource)] - - -class QosController(common.NeutronController, wsgi.Controller): - """qos API controller based on NeutronController.""" - - _qos_ops_param_list = [ - {'param-name': 'qos_name', 'required': True}, - {'param-name': 'qos_desc', 'required': True}, - ] - - _serialization_metadata = { - "application/xml": { - "attributes": { - "qos": ["id", "name"], - }, - }, - } - - def __init__(self, plugin): - self._resource_name = 'qos' - self._plugin = plugin - - def index(self, request, tenant_id): - """Returns a list of qos ids.""" - return self._items(request, tenant_id, is_detail=False) - - def _items(self, request, tenant_id, is_detail): - """Returns a list of qoss.""" - qoss = self._plugin.get_all_qoss(tenant_id) - builder = qos_view.get_view_builder(request) - result = [builder.build(qos, is_detail)['qos'] for qos in qoss] - return dict(qoss=result) - - # pylint: disable-msg=E1101 - def show(self, request, tenant_id, id): - """Returns qos details for the given qos id.""" - try: - qos = self._plugin.get_qos_details(tenant_id, id) - builder = qos_view.get_view_builder(request) - #build response with details - result = builder.build(qos, True) - return dict(qoss=result) - except exception.QosNotFound as exp: - return faults.Fault(faults.QosNotFound(exp)) - - def create(self, request, tenant_id): - """Creates a new qos for a given tenant.""" - #look for qos name in request - try: - body = self._deserialize(request.body, request.get_content_type()) - req_body = self._prepare_request_body(body, - self._qos_ops_param_list) - req_params = req_body[self._resource_name] - except exc.HTTPError as exp: - return faults.Fault(exp) - qos = self._plugin.create_qos(tenant_id, - req_params['qos_name'], - req_params['qos_desc']) - builder = qos_view.get_view_builder(request) - result = builder.build(qos) - return dict(qoss=result) - - def update(self, request, tenant_id, id): - """Updates the name for the qos with the given id.""" - try: - body = self._deserialize(request.body, request.get_content_type()) - req_body = self._prepare_request_body(body, - self._qos_ops_param_list) - req_params = req_body[self._resource_name] - except exc.HTTPError as exp: - return faults.Fault(exp) - try: - qos = self._plugin.rename_qos(tenant_id, id, - req_params['qos_name']) - - builder = qos_view.get_view_builder(request) - result = builder.build(qos, True) - return dict(qoss=result) - except exception.QosNotFound as exp: - return faults.Fault(faults.QosNotFound(exp)) - - def delete(self, request, tenant_id, id): - """Destroys the qos with the given id.""" - try: - self._plugin.delete_qos(tenant_id, id) - return exc.HTTPOk() - except exception.QosNotFound as exp: - return faults.Fault(faults.QosNotFound(exp)) diff --git a/neutron/plugins/cisco/l2device_plugin_base.py b/neutron/plugins/cisco/l2device_plugin_base.py deleted file mode 100644 index ef75e1188..000000000 --- a/neutron/plugins/cisco/l2device_plugin_base.py +++ /dev/null @@ -1,175 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. - -import abc -import inspect -import six - - -@six.add_metaclass(abc.ABCMeta) -class L2DevicePluginBase(object): - """Base class for a device-specific plugin. - - An example of a device-specific plugin is a Nexus switch plugin. - The network model relies on device-category-specific plugins to perform - the configuration on each device. - """ - - @abc.abstractmethod - def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id, - **kwargs): - """Create network. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def delete_network(self, tenant_id, net_id, **kwargs): - """Delete network. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def update_network(self, tenant_id, net_id, name, **kwargs): - """Update network. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): - """Create port. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def delete_port(self, tenant_id, net_id, port_id, **kwargs): - """Delete port. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def update_port(self, tenant_id, net_id, port_id, **kwargs): - """Update port. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, - **kwargs): - """Plug interface. - - :returns: - :raises: - """ - pass - - @abc.abstractmethod - def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): - """Unplug interface. - - :returns: - :raises: - """ - pass - - def create_subnet(self, tenant_id, net_id, ip_version, - subnet_cidr, **kwargs): - """Create subnet. - - :returns: - :raises: - """ - pass - - def get_subnets(self, tenant_id, net_id, **kwargs): - """Get subnets. - - :returns: - :raises: - """ - pass - - def get_subnet(self, tenant_id, net_id, subnet_id, **kwargs): - """Get subnet. - - :returns: - :raises: - """ - pass - - def update_subnet(self, tenant_id, net_id, subnet_id, **kwargs): - """Update subnet. - - :returns: - :raises: - """ - pass - - def delete_subnet(self, tenant_id, net_id, subnet_id, **kwargs): - """Delete subnet. - - :returns: - :raises: - """ - pass - - @classmethod - def __subclasshook__(cls, klass): - """Check plugin class. - - The __subclasshook__ method is a class method - that will be called every time a class is tested - using issubclass(klass, Plugin). - In that case, it will check that every method - marked with the abstractmethod decorator is - provided by the plugin class. - """ - if cls is L2DevicePluginBase: - for method in cls.__abstractmethods__: - method_ok = False - for base in klass.__mro__: - if method in base.__dict__: - fn_obj = base.__dict__[method] - if inspect.isfunction(fn_obj): - abstract_fn_obj = cls.__dict__[method] - arg_count = fn_obj.func_code.co_argcount - expected_arg_count = \ - abstract_fn_obj.func_code.co_argcount - method_ok = arg_count == expected_arg_count - if method_ok: - continue - return NotImplemented - return True - return NotImplemented diff --git a/neutron/plugins/cisco/models/__init__.py b/neutron/plugins/cisco/models/__init__.py deleted file mode 100644 index 833357b73..000000000 --- a/neutron/plugins/cisco/models/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/cisco/models/virt_phy_sw_v2.py b/neutron/plugins/cisco/models/virt_phy_sw_v2.py deleted file mode 100644 index b7452f363..000000000 --- a/neutron/plugins/cisco/models/virt_phy_sw_v2.py +++ /dev/null @@ -1,553 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cisco Systems, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Rohit Agarwalla, Cisco Systems, Inc. -# - -import inspect -import logging -import sys - -from neutron.api.v2 import attributes -from neutron.db import api as db_api -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron import neutron_plugin_base_v2 -from neutron.openstack.common import importutils -from neutron.plugins.cisco.common import cisco_constants as const -from neutron.plugins.cisco.common import cisco_credentials_v2 as cred -from neutron.plugins.cisco.common import cisco_exceptions as cexc -from neutron.plugins.cisco.common import config as conf -from neutron.plugins.cisco.db import network_db_v2 as cdb -from neutron.plugins.openvswitch import ovs_db_v2 as odb - - -LOG = logging.getLogger(__name__) - - -class VirtualPhysicalSwitchModelV2(neutron_plugin_base_v2.NeutronPluginBaseV2): - """Virtual Physical Switch Model. - - This implementation works with OVS and Nexus plugin for the - following topology: - One or more servers to a nexus switch. - """ - __native_bulk_support = True - supported_extension_aliases = ["provider", "binding"] - _methods_to_delegate = ['create_network_bulk', - 'get_network', 'get_networks', - 'create_port_bulk', - 'get_port', 'get_ports', - 'create_subnet', 'create_subnet_bulk', - 'delete_subnet', 'update_subnet', - 'get_subnet', 'get_subnets', - 'create_or_update_agent', 'report_state'] - - def __init__(self): - """Initialize the segmentation manager. - - Checks which device plugins are configured, and load the inventories - those device plugins for which the inventory is configured. - """ - conf.CiscoConfigOptions() - - self._plugins = {} - for key in conf.CISCO_PLUGINS.keys(): - plugin_obj = conf.CISCO_PLUGINS[key] - if plugin_obj is not None: - self._plugins[key] = importutils.import_object(plugin_obj) - LOG.debug(_("Loaded device plugin %s"), - conf.CISCO_PLUGINS[key]) - - if ((const.VSWITCH_PLUGIN in self._plugins) and - hasattr(self._plugins[const.VSWITCH_PLUGIN], - "supported_extension_aliases")): - self.supported_extension_aliases.extend( - self._plugins[const.VSWITCH_PLUGIN]. - supported_extension_aliases) - # At this point, all the database models should have been loaded. It's - # possible that configure_db() may have been called by one of the - # plugins loaded in above. Otherwise, this call is to make sure that - # the database is initialized - db_api.configure_db() - - # Initialize credential store after database initialization - cred.Store.initialize() - LOG.debug(_("%(module)s.%(name)s init done"), - {'module': __name__, - 'name': self.__class__.__name__}) - - # Check whether we have a valid Nexus driver loaded - self.is_nexus_plugin = False - nexus_driver = conf.CISCO.nexus_driver - if nexus_driver.endswith('CiscoNEXUSDriver'): - self.is_nexus_plugin = True - - def __getattribute__(self, name): - """Delegate calls to OVS sub-plugin. - - This delegates the calls to the methods implemented only by the OVS - sub-plugin. Note: Currently, bulking is handled by the caller - (PluginV2), and this model class expects to receive only non-bulking - calls. If, however, a bulking call is made, this will method will - delegate the call to the OVS plugin. - """ - super_getattribute = super(VirtualPhysicalSwitchModelV2, - self).__getattribute__ - methods = super_getattribute('_methods_to_delegate') - - if name in methods: - plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] - return getattr(plugin, name) - - try: - return super_getattribute(name) - except AttributeError: - plugin = super_getattribute('_plugins')[const.VSWITCH_PLUGIN] - return getattr(plugin, name) - - def _func_name(self, offset=0): - """Get the name of the calling function.""" - frame_record = inspect.stack()[1 + offset] - func_name = frame_record[3] - return func_name - - def _invoke_plugin_per_device(self, plugin_key, function_name, - args, **kwargs): - """Invoke plugin per device. - - Invokes a device plugin's relevant functions (based on the - plugin implementation) for completing this operation. - """ - if plugin_key not in self._plugins: - LOG.info(_("No %s Plugin loaded"), plugin_key) - LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s " - "ignored"), - {'plugin_key': plugin_key, - 'function_name': function_name, - 'args': args}) - else: - func = getattr(self._plugins[plugin_key], function_name) - return func(*args, **kwargs) - - def _get_segmentation_id(self, network_id): - binding_seg_id = odb.get_network_binding(None, network_id) - if not binding_seg_id: - raise cexc.NetworkSegmentIDNotFound(net_id=network_id) - return binding_seg_id.segmentation_id - - def _get_provider_vlan_id(self, network): - if (all(attributes.is_attr_set(network.get(attr)) - for attr in (provider.NETWORK_TYPE, - provider.PHYSICAL_NETWORK, - provider.SEGMENTATION_ID)) - and - network[provider.NETWORK_TYPE] == const.NETWORK_TYPE_VLAN): - return network[provider.SEGMENTATION_ID] - - def create_network(self, context, network): - """Create network. - - Perform this operation in the context of the configured device - plugins. - """ - LOG.debug(_("create_network() called")) - provider_vlan_id = self._get_provider_vlan_id(network[const.NETWORK]) - args = [context, network] - ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - args) - # The vswitch plugin did all the verification. If it's a provider - # vlan network, save it for the nexus plugin to use later. - if provider_vlan_id: - network_id = ovs_output[const.NET_ID] - cdb.add_provider_network(network_id, - const.NETWORK_TYPE_VLAN, - provider_vlan_id) - LOG.debug(_("Provider network added to DB: %(network_id)s, " - "%(vlan_id)s"), - {'network_id': network_id, 'vlan_id': provider_vlan_id}) - return ovs_output - - def update_network(self, context, id, network): - """Update network. - - Perform this operation in the context of the configured device - plugins. - - Note that the Nexus sub-plugin does not need to be notified - (and the Nexus switch does not need to be [re]configured) - for an update network operation because the Nexus sub-plugin - is agnostic of all network-level attributes except the - segmentation ID. Furthermore, updating of the segmentation ID - is not supported by the OVS plugin since it is considered a - provider attribute, so it is not supported by this method. - """ - LOG.debug(_("update_network() called")) - - # We can only support updating of provider attributes if all the - # configured sub-plugins support it. Currently we have no method - # in place for checking whether a sub-plugin supports it, - # so assume not. - provider._raise_if_updates_provider_attributes(network['network']) - - args = [context, id, network] - return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - args) - - def delete_network(self, context, id): - """Delete network. - - Perform this operation in the context of the configured device - plugins. - """ - args = [context, id] - ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - args) - if cdb.remove_provider_network(id): - LOG.debug(_("Provider network removed from DB: %s"), id) - return ovs_output - - def get_network(self, context, id, fields=None): - """Get network. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - """Get networks. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def _invoke_nexus_for_net_create(self, context, tenant_id, net_id, - instance_id, host_id): - if not self.is_nexus_plugin: - return False - - network = self.get_network(context, net_id) - vlan_id = self._get_segmentation_id(net_id) - vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) - network[const.NET_VLAN_ID] = vlan_id - network[const.NET_VLAN_NAME] = vlan_name - attachment = { - const.TENANT_ID: tenant_id, - const.INSTANCE_ID: instance_id, - const.HOST_NAME: host_id, - } - self._invoke_plugin_per_device( - const.NEXUS_PLUGIN, - 'create_network', - [network, attachment]) - - def _check_valid_port_device_owner(self, port): - """Check the port for valid device_owner. - - Don't call the nexus plugin for router and dhcp - port owners. - """ - return port['device_owner'].startswith('compute') - - def _get_port_host_id_from_bindings(self, port): - """Get host_id from portbindings.""" - host_id = None - - if (portbindings.HOST_ID in port and - attributes.is_attr_set(port[portbindings.HOST_ID])): - host_id = port[portbindings.HOST_ID] - - return host_id - - def create_port(self, context, port): - """Create port. - - Perform this operation in the context of the configured device - plugins. - """ - LOG.debug(_("create_port() called")) - args = [context, port] - ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - args) - instance_id = port['port']['device_id'] - - # Only call nexus plugin if there's a valid instance_id, host_id - # and device_owner - try: - host_id = self._get_port_host_id_from_bindings(port['port']) - if (instance_id and host_id and - self._check_valid_port_device_owner(port['port'])): - net_id = port['port']['network_id'] - tenant_id = port['port']['tenant_id'] - self._invoke_nexus_for_net_create( - context, tenant_id, net_id, instance_id, host_id) - except Exception: - # Create network on the Nexus plugin has failed, so we need - # to rollback the port creation on the VSwitch plugin. - exc_info = sys.exc_info() - try: - id = ovs_output['id'] - args = [context, id] - ovs_output = self._invoke_plugin_per_device( - const.VSWITCH_PLUGIN, - 'delete_port', - args) - finally: - # Re-raise the original exception - raise exc_info[0], exc_info[1], exc_info[2] - return ovs_output - - def get_port(self, context, id, fields=None): - """Get port. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def get_ports(self, context, filters=None, fields=None): - """Get ports. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def _check_nexus_net_create_needed(self, new_port, old_port): - """Check if nexus plugin should be invoked for net_create. - - In the following cases, the plugin should be invoked: - -- a port is attached to a VM instance. The old host id is None - -- VM migration. The old host id has a valid value - - When the plugin needs to be invoked, return the old_host_id, - and a list of calling arguments. - Otherwise, return '' for old host id and an empty list - """ - old_device_id = old_port['device_id'] - new_device_id = new_port.get('device_id') - new_host_id = self._get_port_host_id_from_bindings(new_port) - tenant_id = old_port['tenant_id'] - net_id = old_port['network_id'] - old_host_id = self._get_port_host_id_from_bindings(old_port) - - LOG.debug(_("tenant_id: %(tid)s, net_id: %(nid)s, " - "old_device_id: %(odi)s, new_device_id: %(ndi)s, " - "old_host_id: %(ohi)s, new_host_id: %(nhi)s, " - "old_device_owner: %(odo)s, new_device_owner: %(ndo)s"), - {'tid': tenant_id, 'nid': net_id, - 'odi': old_device_id, 'ndi': new_device_id, - 'ohi': old_host_id, 'nhi': new_host_id, - 'odo': old_port.get('device_owner'), - 'ndo': new_port.get('device_owner')}) - - # A port is attached to an instance - if (new_device_id and not old_device_id and new_host_id and - self._check_valid_port_device_owner(new_port)): - return '', [tenant_id, net_id, new_device_id, new_host_id] - - # An instance is being migrated - if (old_device_id and old_host_id and new_host_id != old_host_id and - self._check_valid_port_device_owner(old_port)): - return old_host_id, [tenant_id, net_id, old_device_id, new_host_id] - - # no need to invoke the plugin - return '', [] - - def update_port(self, context, id, port): - """Update port. - - Perform this operation in the context of the configured device - plugins. - """ - LOG.debug(_("update_port() called")) - old_port = self.get_port(context, id) - args = [context, id, port] - ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - args) - try: - # Check if the nexus plugin needs to be invoked - old_host_id, create_args = self._check_nexus_net_create_needed( - port['port'], old_port) - - # In the case of migration, invoke it to remove - # the previous port binding - if old_host_id: - vlan_id = self._get_segmentation_id(old_port['network_id']) - delete_args = [old_port['device_id'], vlan_id] - self._invoke_plugin_per_device(const.NEXUS_PLUGIN, - "delete_port", - delete_args) - - # Invoke the Nexus plugin to create a net and/or new port binding - if create_args: - self._invoke_nexus_for_net_create(context, *create_args) - - return ovs_output - except Exception: - exc_info = sys.exc_info() - LOG.error(_("Unable to update port '%s' on Nexus switch"), - old_port['name'], exc_info=exc_info) - try: - # Roll back vSwitch plugin to original port attributes. - args = [context, id, {'port': old_port}] - self._invoke_plugin_per_device( - const.VSWITCH_PLUGIN, - self._func_name(), - args) - finally: - # Re-raise the original exception - raise exc_info[0], exc_info[1], exc_info[2] - - def delete_port(self, context, id, l3_port_check=True): - """Delete port. - - Perform this operation in the context of the configured device - plugins. - """ - LOG.debug(_("delete_port() called")) - port = self.get_port(context, id) - - host_id = self._get_port_host_id_from_bindings(port) - - if (self.is_nexus_plugin and host_id and - self._check_valid_port_device_owner(port)): - vlan_id = self._get_segmentation_id(port['network_id']) - n_args = [port['device_id'], vlan_id] - self._invoke_plugin_per_device(const.NEXUS_PLUGIN, - self._func_name(), - n_args) - try: - args = [context, id] - ovs_output = self._invoke_plugin_per_device( - const.VSWITCH_PLUGIN, self._func_name(), - args, l3_port_check=l3_port_check) - except Exception: - exc_info = sys.exc_info() - # Roll back the delete port on the Nexus plugin - try: - tenant_id = port['tenant_id'] - net_id = port['network_id'] - instance_id = port['device_id'] - host_id = port[portbindings.HOST_ID] - self._invoke_nexus_for_net_create(context, tenant_id, net_id, - instance_id, host_id) - finally: - # Raise the original exception. - raise exc_info[0], exc_info[1], exc_info[2] - - return ovs_output - - def add_router_interface(self, context, router_id, interface_info): - """Add a router interface on a subnet. - - Only invoke the Nexus plugin to create SVI if L3 support on - the Nexus switches is enabled and a Nexus plugin is loaded, - otherwise send it to the vswitch plugin - """ - if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): - LOG.debug(_("L3 enabled on Nexus plugin, create SVI on switch")) - if 'subnet_id' not in interface_info: - raise cexc.SubnetNotSpecified() - if 'port_id' in interface_info: - raise cexc.PortIdForNexusSvi() - subnet = self.get_subnet(context, interface_info['subnet_id']) - gateway_ip = subnet['gateway_ip'] - # Get gateway IP address and netmask - cidr = subnet['cidr'] - netmask = cidr.split('/', 1)[1] - gateway_ip = gateway_ip + '/' + netmask - network_id = subnet['network_id'] - vlan_id = self._get_segmentation_id(network_id) - vlan_name = conf.CISCO.vlan_name_prefix + str(vlan_id) - - n_args = [vlan_name, vlan_id, subnet['id'], gateway_ip, router_id] - return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, - self._func_name(), - n_args) - else: - LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) - n_args = [context, router_id, interface_info] - return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - n_args) - - def remove_router_interface(self, context, router_id, interface_info): - """Remove a router interface. - - Only invoke the Nexus plugin to delete SVI if L3 support on - the Nexus switches is enabled and a Nexus plugin is loaded, - otherwise send it to the vswitch plugin - """ - if (conf.CISCO.nexus_l3_enable and self.is_nexus_plugin): - LOG.debug(_("L3 enabled on Nexus plugin, delete SVI from switch")) - - subnet = self.get_subnet(context, interface_info['subnet_id']) - network_id = subnet['network_id'] - vlan_id = self._get_segmentation_id(network_id) - n_args = [vlan_id, router_id] - - return self._invoke_plugin_per_device(const.NEXUS_PLUGIN, - self._func_name(), - n_args) - else: - LOG.debug(_("L3 disabled or not Nexus plugin, send to vswitch")) - n_args = [context, router_id, interface_info] - return self._invoke_plugin_per_device(const.VSWITCH_PLUGIN, - self._func_name(), - n_args) - - def create_subnet(self, context, subnet): - """Create subnet. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def update_subnet(self, context, id, subnet): - """Update subnet. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def get_subnet(self, context, id, fields=None): - """Get subnet. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def delete_subnet(self, context, id, kwargs): - """Delete subnet. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover - - def get_subnets(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - """Get subnets. This method is delegated to the vswitch plugin. - - This method is included here to satisfy abstract method requirements. - """ - pass # pragma no cover diff --git a/neutron/plugins/cisco/n1kv/__init__.py b/neutron/plugins/cisco/n1kv/__init__.py deleted file mode 100644 index 59a411933..000000000 --- a/neutron/plugins/cisco/n1kv/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems, Inc. -# diff --git a/neutron/plugins/cisco/n1kv/n1kv_client.py b/neutron/plugins/cisco/n1kv/n1kv_client.py deleted file mode 100644 index 541750835..000000000 --- a/neutron/plugins/cisco/n1kv/n1kv_client.py +++ /dev/null @@ -1,541 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Abhishek Raut, Cisco Systems, Inc. -# @author: Rudrajit Tapadar, Cisco Systems, Inc. - -import base64 -import eventlet -import netaddr -import requests - -from neutron.common import exceptions as n_exc -from neutron.extensions import providernet -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log as logging -from neutron.plugins.cisco.common import cisco_constants as c_const -from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred -from neutron.plugins.cisco.common import cisco_exceptions as c_exc -from neutron.plugins.cisco.common import config as c_conf -from neutron.plugins.cisco.db import network_db_v2 -from neutron.plugins.cisco.extensions import n1kv - -LOG = logging.getLogger(__name__) - - -class Client(object): - - """ - Client for the Cisco Nexus1000V Neutron Plugin. - - This client implements functions to communicate with - Cisco Nexus1000V VSM. - - For every Neutron objects, Cisco Nexus1000V Neutron Plugin - creates a corresponding object in the controller (Cisco - Nexus1000V VSM). - - CONCEPTS: - - Following are few concepts used in Nexus1000V VSM: - - port-profiles: - Policy profiles correspond to port profiles on Nexus1000V VSM. - Port profiles are the primary mechanism by which network policy is - defined and applied to switch interfaces in a Nexus 1000V system. - - network-segment: - Each network-segment represents a broadcast domain. - - network-segment-pool: - A network-segment-pool contains one or more network-segments. - - logical-network: - A logical-network contains one or more network-segment-pools. - - bridge-domain: - A bridge-domain is created when the network-segment is of type VXLAN. - Each VXLAN <--> VLAN combination can be thought of as a bridge domain. - - ip-pool: - Each ip-pool represents a subnet on the Nexus1000V VSM. - - vm-network: - vm-network refers to a network-segment and policy-profile. - It maintains a list of ports that uses the network-segment and - policy-profile this vm-network refers to. - - events: - Events correspond to commands that are logged on Nexus1000V VSM. - Events are used to poll for a certain resource on Nexus1000V VSM. - Event type of port_profile: Return all updates/create/deletes - of port profiles from the VSM. - Event type of port_profile_update: Return only updates regarding - policy-profiles. - Event type of port_profile_delete: Return only deleted policy profiles. - - - WORK FLOW: - - For every network profile a corresponding logical-network and - a network-segment-pool, under this logical-network, will be created. - - For every network created from a given network profile, a - network-segment will be added to the network-segment-pool corresponding - to that network profile. - - A port is created on a network and associated with a policy-profile. - Hence for every unique combination of a network and a policy-profile, a - unique vm-network will be created and a reference to the port will be - added. If the same combination of network and policy-profile is used by - another port, the references to that port will be added to the same - vm-network. - - - """ - - # Define paths for the URI where the client connects for HTTP requests. - port_profiles_path = "/virtual-port-profile" - network_segment_path = "/network-segment/%s" - network_segment_pool_path = "/network-segment-pool/%s" - ip_pool_path = "/ip-pool-template/%s" - ports_path = "/kvm/vm-network/%s/ports" - port_path = "/kvm/vm-network/%s/ports/%s" - vm_networks_path = "/kvm/vm-network" - vm_network_path = "/kvm/vm-network/%s" - bridge_domains_path = "/kvm/bridge-domain" - bridge_domain_path = "/kvm/bridge-domain/%s" - logical_network_path = "/logical-network/%s" - events_path = "/kvm/events" - clusters_path = "/cluster" - encap_profiles_path = "/encapsulation-profile" - encap_profile_path = "/encapsulation-profile/%s" - - pool = eventlet.GreenPool(c_conf.CISCO_N1K.http_pool_size) - - def __init__(self, **kwargs): - """Initialize a new client for the plugin.""" - self.format = 'json' - self.hosts = self._get_vsm_hosts() - self.action_prefix = 'http://%s/api/n1k' % self.hosts[0] - self.timeout = c_const.DEFAULT_HTTP_TIMEOUT - - def list_port_profiles(self): - """ - Fetch all policy profiles from the VSM. - - :returns: JSON string - """ - return self._get(self.port_profiles_path) - - def create_bridge_domain(self, network, overlay_subtype): - """ - Create a bridge domain on VSM. - - :param network: network dict - :param overlay_subtype: string representing subtype of overlay network - """ - body = {'name': network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX, - 'segmentId': network[providernet.SEGMENTATION_ID], - 'subType': overlay_subtype, - 'tenantId': network['tenant_id']} - if overlay_subtype == c_const.NETWORK_SUBTYPE_NATIVE_VXLAN: - body['groupIp'] = network[n1kv.MULTICAST_IP] - return self._post(self.bridge_domains_path, - body=body) - - def delete_bridge_domain(self, name): - """ - Delete a bridge domain on VSM. - - :param name: name of the bridge domain to be deleted - """ - return self._delete(self.bridge_domain_path % name) - - def create_network_segment(self, network, network_profile): - """ - Create a network segment on the VSM. - - :param network: network dict - :param network_profile: network profile dict - """ - body = {'publishName': network['id'], - 'description': network['name'], - 'id': network['id'], - 'tenantId': network['tenant_id'], - 'networkSegmentPool': network_profile['id'], } - if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: - body['vlan'] = network[providernet.SEGMENTATION_ID] - elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: - body['bridgeDomain'] = (network['id'] + - c_const.BRIDGE_DOMAIN_SUFFIX) - if network_profile['segment_type'] == c_const.NETWORK_TYPE_TRUNK: - body['mode'] = c_const.NETWORK_TYPE_TRUNK - body['segmentType'] = network_profile['sub_type'] - if network_profile['sub_type'] == c_const.NETWORK_TYPE_VLAN: - body['addSegments'] = network['add_segment_list'] - body['delSegments'] = network['del_segment_list'] - else: - body['encapProfile'] = (network['id'] + - c_const.ENCAPSULATION_PROFILE_SUFFIX) - else: - body['mode'] = 'access' - body['segmentType'] = network_profile['segment_type'] - return self._post(self.network_segment_path % network['id'], - body=body) - - def update_network_segment(self, network_segment_id, body): - """ - Update a network segment on the VSM. - - Network segment on VSM can be updated to associate it with an ip-pool - or update its description and segment id. - - :param network_segment_id: UUID representing the network segment - :param body: dict of arguments to be updated - """ - return self._post(self.network_segment_path % network_segment_id, - body=body) - - def delete_network_segment(self, network_segment_id): - """ - Delete a network segment on the VSM. - - :param network_segment_id: UUID representing the network segment - """ - return self._delete(self.network_segment_path % network_segment_id) - - def create_logical_network(self, network_profile, tenant_id): - """ - Create a logical network on the VSM. - - :param network_profile: network profile dict - :param tenant_id: UUID representing the tenant - """ - LOG.debug(_("Logical network")) - body = {'description': network_profile['name'], - 'tenantId': tenant_id} - logical_network_name = (network_profile['id'] + - c_const.LOGICAL_NETWORK_SUFFIX) - return self._post(self.logical_network_path % logical_network_name, - body=body) - - def delete_logical_network(self, logical_network_name): - """ - Delete a logical network on VSM. - - :param logical_network_name: string representing name of the logical - network - """ - return self._delete( - self.logical_network_path % logical_network_name) - - def create_network_segment_pool(self, network_profile, tenant_id): - """ - Create a network segment pool on the VSM. - - :param network_profile: network profile dict - :param tenant_id: UUID representing the tenant - """ - LOG.debug(_("network_segment_pool")) - logical_network_name = (network_profile['id'] + - c_const.LOGICAL_NETWORK_SUFFIX) - body = {'name': network_profile['name'], - 'description': network_profile['name'], - 'id': network_profile['id'], - 'logicalNetwork': logical_network_name, - 'tenantId': tenant_id} - return self._post( - self.network_segment_pool_path % network_profile['id'], - body=body) - - def update_network_segment_pool(self, network_profile): - """ - Update a network segment pool on the VSM. - - :param network_profile: network profile dict - """ - body = {'name': network_profile['name'], - 'description': network_profile['name']} - return self._post(self.network_segment_pool_path % - network_profile['id'], body=body) - - def delete_network_segment_pool(self, network_segment_pool_id): - """ - Delete a network segment pool on the VSM. - - :param network_segment_pool_id: UUID representing the network - segment pool - """ - return self._delete(self.network_segment_pool_path % - network_segment_pool_id) - - def create_ip_pool(self, subnet): - """ - Create an ip-pool on the VSM. - - :param subnet: subnet dict - """ - if subnet['cidr']: - try: - ip = netaddr.IPNetwork(subnet['cidr']) - netmask = str(ip.netmask) - network_address = str(ip.network) - except (ValueError, netaddr.AddrFormatError): - msg = _("Invalid input for CIDR") - raise n_exc.InvalidInput(error_message=msg) - else: - netmask = network_address = "" - - if subnet['allocation_pools']: - address_range_start = subnet['allocation_pools'][0]['start'] - address_range_end = subnet['allocation_pools'][0]['end'] - else: - address_range_start = None - address_range_end = None - - body = {'addressRangeStart': address_range_start, - 'addressRangeEnd': address_range_end, - 'ipAddressSubnet': netmask, - 'description': subnet['name'], - 'gateway': subnet['gateway_ip'], - 'dhcp': subnet['enable_dhcp'], - 'dnsServersList': subnet['dns_nameservers'], - 'networkAddress': network_address, - 'tenantId': subnet['tenant_id']} - return self._post(self.ip_pool_path % subnet['id'], - body=body) - - def update_ip_pool(self, subnet): - """ - Update an ip-pool on the VSM. - - :param subnet: subnet dictionary - """ - body = {'description': subnet['name'], - 'dhcp': subnet['enable_dhcp'], - 'dnsServersList': subnet['dns_nameservers']} - return self._post(self.ip_pool_path % subnet['id'], - body=body) - - def delete_ip_pool(self, subnet_id): - """ - Delete an ip-pool on the VSM. - - :param subnet_id: UUID representing the subnet - """ - return self._delete(self.ip_pool_path % subnet_id) - - def create_vm_network(self, - port, - vm_network_name, - policy_profile): - """ - Create a VM network on the VSM. - - :param port: port dict - :param vm_network_name: name of the VM network - :param policy_profile: policy profile dict - """ - body = {'name': vm_network_name, - 'networkSegmentId': port['network_id'], - 'networkSegment': port['network_id'], - 'portProfile': policy_profile['name'], - 'portProfileId': policy_profile['id'], - 'tenantId': port['tenant_id'], - 'portId': port['id'], - 'macAddress': port['mac_address'], - } - if port.get('fixed_ips'): - body['ipAddress'] = port['fixed_ips'][0]['ip_address'] - body['subnetId'] = port['fixed_ips'][0]['subnet_id'] - return self._post(self.vm_networks_path, - body=body) - - def delete_vm_network(self, vm_network_name): - """ - Delete a VM network on the VSM. - - :param vm_network_name: name of the VM network - """ - return self._delete(self.vm_network_path % vm_network_name) - - def create_n1kv_port(self, port, vm_network_name): - """ - Create a port on the VSM. - - :param port: port dict - :param vm_network_name: name of the VM network which imports this port - """ - body = {'id': port['id'], - 'macAddress': port['mac_address']} - if port.get('fixed_ips'): - body['ipAddress'] = port['fixed_ips'][0]['ip_address'] - body['subnetId'] = port['fixed_ips'][0]['subnet_id'] - return self._post(self.ports_path % vm_network_name, - body=body) - - def update_n1kv_port(self, vm_network_name, port_id, body): - """ - Update a port on the VSM. - - Update the mac address associated with the port - - :param vm_network_name: name of the VM network which imports this port - :param port_id: UUID of the port - :param body: dict of the arguments to be updated - """ - return self._post(self.port_path % (vm_network_name, port_id), - body=body) - - def delete_n1kv_port(self, vm_network_name, port_id): - """ - Delete a port on the VSM. - - :param vm_network_name: name of the VM network which imports this port - :param port_id: UUID of the port - """ - return self._delete(self.port_path % (vm_network_name, port_id)) - - def _do_request(self, method, action, body=None, - headers=None): - """ - Perform the HTTP request. - - The response is in either JSON format or plain text. A GET method will - invoke a JSON response while a PUT/POST/DELETE returns message from the - VSM in plain text format. - Exception is raised when VSM replies with an INTERNAL SERVER ERROR HTTP - status code (500) i.e. an error has occurred on the VSM or SERVICE - UNAVAILABLE (503) i.e. VSM is not reachable. - - :param method: type of the HTTP request. POST, GET, PUT or DELETE - :param action: path to which the client makes request - :param body: dict for arguments which are sent as part of the request - :param headers: header for the HTTP request - :returns: JSON or plain text in HTTP response - """ - action = self.action_prefix + action - if not headers and self.hosts: - headers = self._get_auth_header(self.hosts[0]) - headers['Content-Type'] = self._set_content_type('json') - headers['Accept'] = self._set_content_type('json') - if body: - body = jsonutils.dumps(body, indent=2) - LOG.debug(_("req: %s"), body) - try: - resp = self.pool.spawn(requests.request, - method, - url=action, - data=body, - headers=headers, - timeout=self.timeout).wait() - except Exception as e: - raise c_exc.VSMConnectionFailed(reason=e) - LOG.debug(_("status_code %s"), resp.status_code) - if resp.status_code == requests.codes.OK: - if 'application/json' in resp.headers['content-type']: - try: - return resp.json() - except ValueError: - return {} - elif 'text/plain' in resp.headers['content-type']: - LOG.debug(_("VSM: %s"), resp.text) - else: - raise c_exc.VSMError(reason=resp.text) - - def _set_content_type(self, format=None): - """ - Set the mime-type to either 'xml' or 'json'. - - :param format: format to be set. - :return: mime-type string - """ - if not format: - format = self.format - return "application/%s" % format - - def _delete(self, action, body=None, headers=None): - return self._do_request("DELETE", action, body=body, - headers=headers) - - def _get(self, action, body=None, headers=None): - return self._do_request("GET", action, body=body, - headers=headers) - - def _post(self, action, body=None, headers=None): - return self._do_request("POST", action, body=body, - headers=headers) - - def _put(self, action, body=None, headers=None): - return self._do_request("PUT", action, body=body, - headers=headers) - - def _get_vsm_hosts(self): - """ - Retrieve a list of VSM ip addresses. - - :return: list of host ip addresses - """ - return [cr[c_const.CREDENTIAL_NAME] for cr in - network_db_v2.get_all_n1kv_credentials()] - - def _get_auth_header(self, host_ip): - """ - Retrieve header with auth info for the VSM. - - :param host_ip: IP address of the VSM - :return: authorization header dict - """ - username = c_cred.Store.get_username(host_ip) - password = c_cred.Store.get_password(host_ip) - auth = base64.encodestring("%s:%s" % (username, password)).rstrip() - header = {"Authorization": "Basic %s" % auth} - return header - - def get_clusters(self): - """Fetches a list of all vxlan gateway clusters.""" - return self._get(self.clusters_path) - - def create_encapsulation_profile(self, encap): - """ - Create an encapsulation profile on VSM. - - :param encap: encapsulation dict - """ - body = {'name': encap['name'], - 'addMappings': encap['add_segment_list'], - 'delMappings': encap['del_segment_list']} - return self._post(self.encap_profiles_path, - body=body) - - def update_encapsulation_profile(self, context, profile_name, body): - """ - Adds a vlan to bridge-domain mapping to an encapsulation profile. - - :param profile_name: Name of the encapsulation profile - :param body: mapping dictionary - """ - return self._post(self.encap_profile_path - % profile_name, body=body) - - def delete_encapsulation_profile(self, name): - """ - Delete an encapsulation profile on VSM. - - :param name: name of the encapsulation profile to be deleted - """ - return self._delete(self.encap_profile_path % name) diff --git a/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py b/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py deleted file mode 100644 index 6ef51f3d0..000000000 --- a/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py +++ /dev/null @@ -1,1438 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cisco Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Aruna Kushwaha, Cisco Systems, Inc. -# @author: Rudrajit Tapadar, Cisco Systems, Inc. -# @author: Abhishek Raut, Cisco Systems, Inc. -# @author: Sergey Sudakovich, Cisco Systems, Inc. - -import eventlet - -from oslo.config import cfg as q_conf - -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_db -from neutron.extensions import portbindings -from neutron.extensions import providernet -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.openstack.common import uuidutils as uuidutils -from neutron.plugins.cisco.common import cisco_constants as c_const -from neutron.plugins.cisco.common import cisco_credentials_v2 as c_cred -from neutron.plugins.cisco.common import cisco_exceptions -from neutron.plugins.cisco.common import config as c_conf -from neutron.plugins.cisco.db import n1kv_db_v2 -from neutron.plugins.cisco.db import network_db_v2 -from neutron.plugins.cisco.extensions import n1kv -from neutron.plugins.cisco.n1kv import n1kv_client -from neutron.plugins.common import constants as svc_constants - - -LOG = logging.getLogger(__name__) - - -class N1kvRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin): - - """Class to handle agent RPC calls.""" - - # Set RPC API version to 1.1 by default. - RPC_API_VERSION = '1.1' - - -class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - portbindings_db.PortBindingMixin, - n1kv_db_v2.NetworkProfile_db_mixin, - n1kv_db_v2.PolicyProfile_db_mixin, - network_db_v2.Credential_db_mixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin): - - """ - Implement the Neutron abstractions using Cisco Nexus1000V. - - Refer README file for the architecture, new features, and - workflow - - """ - - # This attribute specifies whether the plugin supports or not - # bulk operations. - __native_bulk_support = False - supported_extension_aliases = ["provider", "agent", - "n1kv", "network_profile", - "policy_profile", "external-net", "router", - "binding", "credential", - "l3_agent_scheduler", - "dhcp_agent_scheduler"] - - def __init__(self, configfile=None): - """ - Initialize Nexus1000V Neutron plugin. - - 1. Initialize VIF type to OVS - 2. Initialize Nexus1000v and Credential DB - 3. Establish communication with Cisco Nexus1000V - """ - super(N1kvNeutronPluginV2, self).__init__() - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - c_cred.Store.initialize() - self._setup_vsm() - self._setup_rpc() - self.network_scheduler = importutils.import_object( - q_conf.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - q_conf.CONF.router_scheduler_driver - ) - - def _setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [N1kvRpcCallbacks(), agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _setup_vsm(self): - """ - Setup Cisco Nexus 1000V related parameters and pull policy profiles. - - Retrieve all the policy profiles from the VSM when the plugin is - is instantiated for the first time and then continue to poll for - policy profile updates. - """ - LOG.debug(_('_setup_vsm')) - self.agent_vsm = True - # Poll VSM for create/delete of policy profile. - eventlet.spawn(self._poll_policy_profiles) - - def _poll_policy_profiles(self): - """Start a green thread to pull policy profiles from VSM.""" - while True: - self._populate_policy_profiles() - eventlet.sleep(c_conf.CISCO_N1K.poll_duration) - - def _populate_policy_profiles(self): - """ - Populate all the policy profiles from VSM. - - The tenant id is not available when the policy profiles are polled - from the VSM. Hence we associate the policy profiles with fake - tenant-ids. - """ - LOG.debug(_('_populate_policy_profiles')) - try: - n1kvclient = n1kv_client.Client() - policy_profiles = n1kvclient.list_port_profiles() - vsm_profiles = {} - plugin_profiles_set = set() - # Fetch policy profiles from VSM - for profile_name in policy_profiles: - profile_id = (policy_profiles - [profile_name][c_const.PROPERTIES][c_const.ID]) - vsm_profiles[profile_id] = profile_name - # Fetch policy profiles previously populated - for profile in n1kv_db_v2.get_policy_profiles(): - plugin_profiles_set.add(profile.id) - vsm_profiles_set = set(vsm_profiles) - # Update database if the profile sets differ. - if vsm_profiles_set ^ plugin_profiles_set: - # Add profiles in database if new profiles were created in VSM - for pid in vsm_profiles_set - plugin_profiles_set: - self._add_policy_profile(vsm_profiles[pid], pid) - - # Delete profiles from database if profiles were deleted in VSM - for pid in plugin_profiles_set - vsm_profiles_set: - self._delete_policy_profile(pid) - self._remove_all_fake_policy_profiles() - except (cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - LOG.warning(_('No policy profile populated from VSM')) - - def _extend_network_dict_provider(self, context, network): - """Add extended network parameters.""" - binding = n1kv_db_v2.get_network_binding(context.session, - network['id']) - network[providernet.NETWORK_TYPE] = binding.network_type - if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: - network[providernet.PHYSICAL_NETWORK] = None - network[providernet.SEGMENTATION_ID] = binding.segmentation_id - network[n1kv.MULTICAST_IP] = binding.multicast_ip - elif binding.network_type == c_const.NETWORK_TYPE_VLAN: - network[providernet.PHYSICAL_NETWORK] = binding.physical_network - network[providernet.SEGMENTATION_ID] = binding.segmentation_id - elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: - network[providernet.PHYSICAL_NETWORK] = binding.physical_network - network[providernet.SEGMENTATION_ID] = None - network[n1kv.MULTICAST_IP] = None - elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - network[providernet.PHYSICAL_NETWORK] = None - network[providernet.SEGMENTATION_ID] = None - network[n1kv.MULTICAST_IP] = None - - def _process_provider_create(self, context, attrs): - network_type = attrs.get(providernet.NETWORK_TYPE) - physical_network = attrs.get(providernet.PHYSICAL_NETWORK) - segmentation_id = attrs.get(providernet.SEGMENTATION_ID) - - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) - - if not (network_type_set or physical_network_set or - segmentation_id_set): - return (None, None, None) - - if not network_type_set: - msg = _("provider:network_type required") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == c_const.NETWORK_TYPE_VLAN: - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - if segmentation_id < 1 or segmentation_id > 4094: - msg = _("provider:segmentation_id out of range " - "(1 through 4094)") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == c_const.NETWORK_TYPE_OVERLAY: - if physical_network_set: - msg = _("provider:physical_network specified for Overlay " - "network") - raise n_exc.InvalidInput(error_message=msg) - else: - physical_network = None - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - if segmentation_id < 5000: - msg = _("provider:segmentation_id out of range " - "(5000+)") - raise n_exc.InvalidInput(error_message=msg) - else: - msg = _("provider:network_type %s not supported"), network_type - raise n_exc.InvalidInput(error_message=msg) - - if network_type == c_const.NETWORK_TYPE_VLAN: - if physical_network_set: - network_profiles = n1kv_db_v2.get_network_profiles() - for network_profile in network_profiles: - if physical_network == network_profile[ - 'physical_network']: - break - else: - msg = (_("Unknown provider:physical_network %s"), - physical_network) - raise n_exc.InvalidInput(error_message=msg) - else: - msg = _("provider:physical_network required") - raise n_exc.InvalidInput(error_message=msg) - - return (network_type, physical_network, segmentation_id) - - def _check_provider_update(self, context, attrs): - """Handle Provider network updates.""" - network_type = attrs.get(providernet.NETWORK_TYPE) - physical_network = attrs.get(providernet.PHYSICAL_NETWORK) - segmentation_id = attrs.get(providernet.SEGMENTATION_ID) - - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) - - if not (network_type_set or physical_network_set or - segmentation_id_set): - return - - # TBD : Need to handle provider network updates - msg = _("Plugin does not support updating provider attributes") - raise n_exc.InvalidInput(error_message=msg) - - def _get_cluster(self, segment1, segment2, clusters): - """ - Returns a cluster to apply the segment mapping - - :param segment1: UUID of segment to be mapped - :param segment2: UUID of segment to be mapped - :param clusters: List of clusters - """ - for cluster in sorted(clusters, key=lambda k: k['size']): - for mapping in cluster[c_const.MAPPINGS]: - for segment in mapping[c_const.SEGMENTS]: - if segment1 in segment or segment2 in segment: - break - else: - cluster['size'] += 2 - return cluster['encapProfileName'] - break - return - - def _extend_mapping_dict(self, context, mapping_dict, segment): - """ - Extend a mapping dictionary with dot1q tag and bridge-domain name. - - :param context: neutron api request context - :param mapping_dict: dictionary to populate values - :param segment: id of the segment being populated - """ - net = self.get_network(context, segment) - if net[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_VLAN: - mapping_dict['dot1q'] = str(net[providernet.SEGMENTATION_ID]) - else: - mapping_dict['bridgeDomain'] = (net['name'] + - c_const.BRIDGE_DOMAIN_SUFFIX) - - def _send_add_multi_segment_request(self, context, net_id, segment_pairs): - """ - Send Add multi-segment network request to VSM. - - :param context: neutron api request context - :param net_id: UUID of the multi-segment network - :param segment_pairs: List of segments in UUID pairs - that need to be bridged - """ - - if not segment_pairs: - return - - session = context.session - n1kvclient = n1kv_client.Client() - clusters = n1kvclient.get_clusters() - online_clusters = [] - encap_dict = {} - for cluster in clusters['body'][c_const.SET]: - cluster = cluster[c_const.PROPERTIES] - if cluster[c_const.STATE] == c_const.ONLINE: - cluster['size'] = 0 - for mapping in cluster[c_const.MAPPINGS]: - cluster['size'] += ( - len(mapping[c_const.SEGMENTS])) - online_clusters.append(cluster) - for (segment1, segment2) in segment_pairs: - encap_profile = self._get_cluster(segment1, segment2, - online_clusters) - if encap_profile is not None: - if encap_profile in encap_dict: - profile_dict = encap_dict[encap_profile] - else: - profile_dict = {'name': encap_profile, - 'addMappings': [], - 'delMappings': []} - encap_dict[encap_profile] = profile_dict - mapping_dict = {} - self._extend_mapping_dict(context, - mapping_dict, segment1) - self._extend_mapping_dict(context, - mapping_dict, segment2) - profile_dict['addMappings'].append(mapping_dict) - n1kv_db_v2.add_multi_segment_encap_profile_name(session, - net_id, - (segment1, - segment2), - encap_profile) - else: - raise cisco_exceptions.NoClusterFound - - for profile in encap_dict: - n1kvclient.update_encapsulation_profile(context, profile, - encap_dict[profile]) - - def _send_del_multi_segment_request(self, context, net_id, segment_pairs): - """ - Send Delete multi-segment network request to VSM. - - :param context: neutron api request context - :param net_id: UUID of the multi-segment network - :param segment_pairs: List of segments in UUID pairs - whose bridging needs to be removed - """ - if not segment_pairs: - return - session = context.session - encap_dict = {} - n1kvclient = n1kv_client.Client() - for (segment1, segment2) in segment_pairs: - binding = ( - n1kv_db_v2.get_multi_segment_network_binding(session, net_id, - (segment1, - segment2))) - encap_profile = binding['encap_profile_name'] - if encap_profile in encap_dict: - profile_dict = encap_dict[encap_profile] - else: - profile_dict = {'name': encap_profile, - 'addMappings': [], - 'delMappings': []} - encap_dict[encap_profile] = profile_dict - mapping_dict = {} - self._extend_mapping_dict(context, - mapping_dict, segment1) - self._extend_mapping_dict(context, - mapping_dict, segment2) - profile_dict['delMappings'].append(mapping_dict) - - for profile in encap_dict: - n1kvclient.update_encapsulation_profile(context, profile, - encap_dict[profile]) - - def _get_encap_segments(self, context, segment_pairs): - """ - Get the list of segments in encapsulation profile format. - - :param context: neutron api request context - :param segment_pairs: List of segments that need to be bridged - """ - member_list = [] - for pair in segment_pairs: - (segment, dot1qtag) = pair - member_dict = {} - net = self.get_network(context, segment) - member_dict['bridgeDomain'] = (net['name'] + - c_const.BRIDGE_DOMAIN_SUFFIX) - member_dict['dot1q'] = dot1qtag - member_list.append(member_dict) - return member_list - - def _populate_member_segments(self, context, network, segment_pairs, oper): - """ - Populate trunk network dict with member segments. - - :param context: neutron api request context - :param network: Dictionary containing the trunk network information - :param segment_pairs: List of segments in UUID pairs - that needs to be trunked - :param oper: Operation to be performed - """ - LOG.debug(_('_populate_member_segments %s'), segment_pairs) - trunk_list = [] - for (segment, dot1qtag) in segment_pairs: - net = self.get_network(context, segment) - member_dict = {'segment': net['name'], - 'dot1qtag': dot1qtag} - trunk_list.append(member_dict) - if oper == n1kv.SEGMENT_ADD: - network['add_segment_list'] = trunk_list - elif oper == n1kv.SEGMENT_DEL: - network['del_segment_list'] = trunk_list - - def _parse_multi_segments(self, context, attrs, param): - """ - Parse the multi-segment network attributes. - - :param context: neutron api request context - :param attrs: Attributes of the network - :param param: Additional parameter indicating an add - or del operation - :returns: List of segment UUIDs in set pairs - """ - pair_list = [] - valid_seg_types = [c_const.NETWORK_TYPE_VLAN, - c_const.NETWORK_TYPE_OVERLAY] - segments = attrs.get(param) - if not attributes.is_attr_set(segments): - return pair_list - for pair in segments.split(','): - segment1, sep, segment2 = pair.partition(':') - if (uuidutils.is_uuid_like(segment1) and - uuidutils.is_uuid_like(segment2)): - binding1 = n1kv_db_v2.get_network_binding(context.session, - segment1) - binding2 = n1kv_db_v2.get_network_binding(context.session, - segment2) - if (binding1.network_type not in valid_seg_types or - binding2.network_type not in valid_seg_types or - binding1.network_type == binding2.network_type): - msg = _("Invalid pairing supplied") - raise n_exc.InvalidInput(error_message=msg) - else: - pair_list.append((segment1, segment2)) - else: - LOG.debug(_('Invalid UUID supplied in %s'), pair) - msg = _("Invalid UUID supplied") - raise n_exc.InvalidInput(error_message=msg) - return pair_list - - def _parse_trunk_segments(self, context, attrs, param, physical_network, - sub_type): - """ - Parse the trunk network attributes. - - :param context: neutron api request context - :param attrs: Attributes of the network - :param param: Additional parameter indicating an add - or del operation - :param physical_network: Physical network of the trunk segment - :param sub_type: Sub-type of the trunk segment - :returns: List of segment UUIDs and dot1qtag (for vxlan) in set pairs - """ - pair_list = [] - segments = attrs.get(param) - if not attributes.is_attr_set(segments): - return pair_list - for pair in segments.split(','): - segment, sep, dot1qtag = pair.partition(':') - if sub_type == c_const.NETWORK_TYPE_VLAN: - dot1qtag = '' - if uuidutils.is_uuid_like(segment): - binding = n1kv_db_v2.get_network_binding(context.session, - segment) - if binding.network_type == c_const.NETWORK_TYPE_TRUNK: - msg = _("Cannot add a trunk segment '%s' as a member of " - "another trunk segment") % segment - raise n_exc.InvalidInput(error_message=msg) - elif binding.network_type == c_const.NETWORK_TYPE_VLAN: - if sub_type == c_const.NETWORK_TYPE_OVERLAY: - msg = _("Cannot add vlan segment '%s' as a member of " - "a vxlan trunk segment") % segment - raise n_exc.InvalidInput(error_message=msg) - if not physical_network: - physical_network = binding.physical_network - elif physical_network != binding.physical_network: - msg = _("Network UUID '%s' belongs to a different " - "physical network") % segment - raise n_exc.InvalidInput(error_message=msg) - elif binding.network_type == c_const.NETWORK_TYPE_OVERLAY: - if sub_type == c_const.NETWORK_TYPE_VLAN: - msg = _("Cannot add vxlan segment '%s' as a member of " - "a vlan trunk segment") % segment - raise n_exc.InvalidInput(error_message=msg) - try: - if not utils.is_valid_vlan_tag(int(dot1qtag)): - msg = _("Vlan tag '%s' is out of range") % dot1qtag - raise n_exc.InvalidInput(error_message=msg) - except ValueError: - msg = _("Vlan tag '%s' is not an integer " - "value") % dot1qtag - raise n_exc.InvalidInput(error_message=msg) - pair_list.append((segment, dot1qtag)) - else: - LOG.debug(_('%s is not a valid uuid'), segment) - msg = _("'%s' is not a valid UUID") % segment - raise n_exc.InvalidInput(error_message=msg) - return pair_list - - def _extend_network_dict_member_segments(self, context, network): - """Add the extended parameter member segments to the network.""" - members = [] - binding = n1kv_db_v2.get_network_binding(context.session, - network['id']) - if binding.network_type == c_const.NETWORK_TYPE_TRUNK: - members = n1kv_db_v2.get_trunk_members(context.session, - network['id']) - elif binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - members = n1kv_db_v2.get_multi_segment_members(context.session, - network['id']) - network[n1kv.MEMBER_SEGMENTS] = members - - def _extend_network_dict_profile(self, context, network): - """Add the extended parameter network profile to the network.""" - binding = n1kv_db_v2.get_network_binding(context.session, - network['id']) - network[n1kv.PROFILE_ID] = binding.profile_id - - def _extend_port_dict_profile(self, context, port): - """Add the extended parameter port profile to the port.""" - binding = n1kv_db_v2.get_port_binding(context.session, - port['id']) - port[n1kv.PROFILE_ID] = binding.profile_id - - def _process_network_profile(self, context, network): - """Validate network profile exists.""" - profile_id = network.get(n1kv.PROFILE_ID) - profile_id_set = attributes.is_attr_set(profile_id) - if not profile_id_set: - profile_name = c_conf.CISCO_N1K.default_network_profile - net_p = self._get_network_profile_by_name(context.session, - profile_name) - profile_id = net_p['id'] - network['n1kv:profile_id'] = profile_id - return profile_id - - def _process_policy_profile(self, context, attrs): - """Validates whether policy profile exists.""" - profile_id = attrs.get(n1kv.PROFILE_ID) - profile_id_set = attributes.is_attr_set(profile_id) - if not profile_id_set: - msg = _("n1kv:profile_id does not exist") - raise n_exc.InvalidInput(error_message=msg) - if not self._policy_profile_exists(profile_id): - msg = _("n1kv:profile_id does not exist") - raise n_exc.InvalidInput(error_message=msg) - - return profile_id - - def _send_create_logical_network_request(self, network_profile, tenant_id): - """ - Send create logical network request to VSM. - - :param network_profile: network profile dictionary - :param tenant_id: UUID representing the tenant - """ - LOG.debug(_('_send_create_logical_network')) - n1kvclient = n1kv_client.Client() - n1kvclient.create_logical_network(network_profile, tenant_id) - - def _send_delete_logical_network_request(self, network_profile): - """ - Send delete logical network request to VSM. - - :param network_profile: network profile dictionary - """ - LOG.debug('_send_delete_logical_network') - n1kvclient = n1kv_client.Client() - logical_network_name = (network_profile['id'] + - c_const.LOGICAL_NETWORK_SUFFIX) - n1kvclient.delete_logical_network(logical_network_name) - - def _send_create_network_profile_request(self, context, profile): - """ - Send create network profile request to VSM. - - :param context: neutron api request context - :param profile: network profile dictionary - """ - LOG.debug(_('_send_create_network_profile_request: %s'), profile['id']) - n1kvclient = n1kv_client.Client() - n1kvclient.create_network_segment_pool(profile, context.tenant_id) - - def _send_update_network_profile_request(self, profile): - """ - Send update network profile request to VSM. - - :param profile: network profile dictionary - """ - LOG.debug(_('_send_update_network_profile_request: %s'), profile['id']) - n1kvclient = n1kv_client.Client() - n1kvclient.update_network_segment_pool(profile) - - def _send_delete_network_profile_request(self, profile): - """ - Send delete network profile request to VSM. - - :param profile: network profile dictionary - """ - LOG.debug(_('_send_delete_network_profile_request: %s'), - profile['name']) - n1kvclient = n1kv_client.Client() - n1kvclient.delete_network_segment_pool(profile['id']) - - def _send_create_network_request(self, context, network, segment_pairs): - """ - Send create network request to VSM. - - Create a bridge domain for network of type Overlay. - :param context: neutron api request context - :param network: network dictionary - :param segment_pairs: List of segments in UUID pairs - that need to be bridged - """ - LOG.debug(_('_send_create_network_request: %s'), network['id']) - profile = self.get_network_profile(context, - network[n1kv.PROFILE_ID]) - n1kvclient = n1kv_client.Client() - if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: - n1kvclient.create_bridge_domain(network, profile['sub_type']) - if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: - self._populate_member_segments(context, network, segment_pairs, - n1kv.SEGMENT_ADD) - network['del_segment_list'] = [] - if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: - encap_dict = {'name': (network['name'] + - c_const.ENCAPSULATION_PROFILE_SUFFIX), - 'add_segment_list': ( - self._get_encap_segments(context, - segment_pairs)), - 'del_segment_list': []} - n1kvclient.create_encapsulation_profile(encap_dict) - n1kvclient.create_network_segment(network, profile) - - def _send_update_network_request(self, context, network, add_segments, - del_segments): - """ - Send update network request to VSM. - - :param context: neutron api request context - :param network: network dictionary - :param add_segments: List of segments bindings - that need to be deleted - :param del_segments: List of segments bindings - that need to be deleted - """ - LOG.debug(_('_send_update_network_request: %s'), network['id']) - db_session = context.session - profile = n1kv_db_v2.get_network_profile( - db_session, network[n1kv.PROFILE_ID]) - n1kvclient = n1kv_client.Client() - body = {'description': network['name'], - 'id': network['id'], - 'networkSegmentPool': profile['id'], - 'vlan': network[providernet.SEGMENTATION_ID], - 'mode': 'access', - 'segmentType': profile['segment_type'], - 'addSegments': [], - 'delSegments': []} - if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: - self._populate_member_segments(context, network, add_segments, - n1kv.SEGMENT_ADD) - self._populate_member_segments(context, network, del_segments, - n1kv.SEGMENT_DEL) - body['mode'] = c_const.NETWORK_TYPE_TRUNK - body['segmentType'] = profile['sub_type'] - body['addSegments'] = network['add_segment_list'] - body['delSegments'] = network['del_segment_list'] - LOG.debug(_('add_segments=%s'), body['addSegments']) - LOG.debug(_('del_segments=%s'), body['delSegments']) - if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: - encap_profile = (network['id'] + - c_const.ENCAPSULATION_PROFILE_SUFFIX) - encap_dict = {'name': encap_profile, - 'addMappings': ( - self._get_encap_segments(context, - add_segments)), - 'delMappings': ( - self._get_encap_segments(context, - del_segments))} - n1kvclient.update_encapsulation_profile(context, encap_profile, - encap_dict) - n1kvclient.update_network_segment(network['id'], body) - - def _send_delete_network_request(self, context, network): - """ - Send delete network request to VSM. - - Delete bridge domain if network is of type Overlay. - Delete encapsulation profile if network is of type OVERLAY Trunk. - :param context: neutron api request context - :param network: network dictionary - """ - LOG.debug(_('_send_delete_network_request: %s'), network['id']) - n1kvclient = n1kv_client.Client() - session = context.session - if network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_OVERLAY: - name = network['id'] + c_const.BRIDGE_DOMAIN_SUFFIX - n1kvclient.delete_bridge_domain(name) - elif network[providernet.NETWORK_TYPE] == c_const.NETWORK_TYPE_TRUNK: - profile = self.get_network_profile( - context, network[n1kv.PROFILE_ID]) - if profile['sub_type'] == c_const.NETWORK_TYPE_OVERLAY: - profile_name = (network['id'] + - c_const.ENCAPSULATION_PROFILE_SUFFIX) - n1kvclient.delete_encapsulation_profile(profile_name) - elif (network[providernet.NETWORK_TYPE] == - c_const.NETWORK_TYPE_MULTI_SEGMENT): - encap_dict = n1kv_db_v2.get_multi_segment_encap_dict(session, - network['id']) - for profile in encap_dict: - profile_dict = {'name': profile, - 'addSegments': [], - 'delSegments': []} - for segment_pair in encap_dict[profile]: - mapping_dict = {} - (segment1, segment2) = segment_pair - self._extend_mapping_dict(context, - mapping_dict, segment1) - self._extend_mapping_dict(context, - mapping_dict, segment2) - profile_dict['delSegments'].append(mapping_dict) - n1kvclient.update_encapsulation_profile(context, profile, - profile_dict) - n1kvclient.delete_network_segment(network['id']) - - def _send_create_subnet_request(self, context, subnet): - """ - Send create subnet request to VSM. - - :param context: neutron api request context - :param subnet: subnet dictionary - """ - LOG.debug(_('_send_create_subnet_request: %s'), subnet['id']) - n1kvclient = n1kv_client.Client() - n1kvclient.create_ip_pool(subnet) - - def _send_update_subnet_request(self, subnet): - """ - Send update subnet request to VSM. - - :param subnet: subnet dictionary - """ - LOG.debug(_('_send_update_subnet_request: %s'), subnet['name']) - n1kvclient = n1kv_client.Client() - n1kvclient.update_ip_pool(subnet) - - def _send_delete_subnet_request(self, context, subnet): - """ - Send delete subnet request to VSM. - - :param context: neutron api request context - :param subnet: subnet dictionary - """ - LOG.debug(_('_send_delete_subnet_request: %s'), subnet['name']) - body = {'ipPool': subnet['id'], 'deleteSubnet': True} - n1kvclient = n1kv_client.Client() - n1kvclient.update_network_segment(subnet['network_id'], body=body) - n1kvclient.delete_ip_pool(subnet['id']) - - def _send_create_port_request(self, - context, - port, - port_count, - policy_profile, - vm_network_name): - """ - Send create port request to VSM. - - Create a VM network for a network and policy profile combination. - If the VM network already exists, bind this port to the existing - VM network on the VSM. - :param context: neutron api request context - :param port: port dictionary - :param port_count: integer representing the number of ports in one - VM Network - :param policy_profile: object of type policy profile - :param vm_network_name: string representing the name of the VM - network - """ - LOG.debug(_('_send_create_port_request: %s'), port) - n1kvclient = n1kv_client.Client() - if port_count == 1: - n1kvclient.create_vm_network(port, - vm_network_name, - policy_profile) - else: - n1kvclient.create_n1kv_port(port, vm_network_name) - - def _send_update_port_request(self, port_id, mac_address, vm_network_name): - """ - Send update port request to VSM. - - :param port_id: UUID representing port to update - :param mac_address: string representing the mac address - :param vm_network_name: VM network name to which the port is bound - """ - LOG.debug(_('_send_update_port_request: %s'), port_id) - body = {'portId': port_id, - 'macAddress': mac_address} - n1kvclient = n1kv_client.Client() - n1kvclient.update_n1kv_port(vm_network_name, port_id, body) - - def _send_delete_port_request(self, context, port, vm_network): - """ - Send delete port request to VSM. - - Delete the port on the VSM. If it is the last port on the VM Network, - delete the VM Network. - :param context: neutron api request context - :param port: port object which is to be deleted - :param vm_network: VM network object with which the port is associated - """ - LOG.debug(_('_send_delete_port_request: %s'), port['id']) - n1kvclient = n1kv_client.Client() - n1kvclient.delete_n1kv_port(vm_network['name'], port['id']) - if vm_network['port_count'] == 0: - n1kvclient.delete_vm_network(vm_network['name']) - - def _get_segmentation_id(self, context, id): - """ - Retrieve segmentation ID for a given network. - - :param context: neutron api request context - :param id: UUID of the network - :returns: segmentation ID for the network - """ - session = context.session - binding = n1kv_db_v2.get_network_binding(session, id) - return binding.segmentation_id - - def create_network(self, context, network): - """ - Create network based on network profile. - - :param context: neutron api request context - :param network: network dictionary - :returns: network object - """ - (network_type, physical_network, - segmentation_id) = self._process_provider_create(context, - network['network']) - profile_id = self._process_network_profile(context, network['network']) - segment_pairs = None - LOG.debug(_('Create network: profile_id=%s'), profile_id) - session = context.session - with session.begin(subtransactions=True): - if not network_type: - # tenant network - (physical_network, network_type, segmentation_id, - multicast_ip) = n1kv_db_v2.alloc_network(session, - profile_id) - LOG.debug(_('Physical_network %(phy_net)s, ' - 'seg_type %(net_type)s, ' - 'seg_id %(seg_id)s, ' - 'multicast_ip %(multicast_ip)s'), - {'phy_net': physical_network, - 'net_type': network_type, - 'seg_id': segmentation_id, - 'multicast_ip': multicast_ip}) - if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - segment_pairs = ( - self._parse_multi_segments(context, network['network'], - n1kv.SEGMENT_ADD)) - LOG.debug(_('Seg list %s '), segment_pairs) - elif network_type == c_const.NETWORK_TYPE_TRUNK: - network_profile = self.get_network_profile(context, - profile_id) - segment_pairs = ( - self._parse_trunk_segments(context, network['network'], - n1kv.SEGMENT_ADD, - physical_network, - network_profile['sub_type'] - )) - LOG.debug(_('Seg list %s '), segment_pairs) - else: - if not segmentation_id: - raise n_exc.TenantNetworksDisabled() - else: - # provider network - if network_type == c_const.NETWORK_TYPE_VLAN: - network_profile = self.get_network_profile(context, - profile_id) - seg_min, seg_max = self._get_segment_range( - network_profile['segment_range']) - if not seg_min <= segmentation_id <= seg_max: - raise cisco_exceptions.VlanIDOutsidePool - n1kv_db_v2.reserve_specific_vlan(session, - physical_network, - segmentation_id) - multicast_ip = "0.0.0.0" - net = super(N1kvNeutronPluginV2, self).create_network(context, - network) - n1kv_db_v2.add_network_binding(session, - net['id'], - network_type, - physical_network, - segmentation_id, - multicast_ip, - profile_id, - segment_pairs) - self._process_l3_create(context, net, network['network']) - self._extend_network_dict_provider(context, net) - self._extend_network_dict_profile(context, net) - try: - if network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - self._send_add_multi_segment_request(context, net['id'], - segment_pairs) - else: - self._send_create_network_request(context, net, segment_pairs) - except(cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - super(N1kvNeutronPluginV2, self).delete_network(context, net['id']) - else: - LOG.debug(_("Created network: %s"), net['id']) - return net - - def update_network(self, context, id, network): - """ - Update network parameters. - - :param context: neutron api request context - :param id: UUID representing the network to update - :returns: updated network object - """ - self._check_provider_update(context, network['network']) - add_segments = [] - del_segments = [] - - session = context.session - with session.begin(subtransactions=True): - net = super(N1kvNeutronPluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - binding = n1kv_db_v2.get_network_binding(session, id) - if binding.network_type == c_const.NETWORK_TYPE_MULTI_SEGMENT: - add_segments = ( - self._parse_multi_segments(context, network['network'], - n1kv.SEGMENT_ADD)) - n1kv_db_v2.add_multi_segment_binding(session, - net['id'], add_segments) - del_segments = ( - self._parse_multi_segments(context, network['network'], - n1kv.SEGMENT_DEL)) - self._send_add_multi_segment_request(context, net['id'], - add_segments) - self._send_del_multi_segment_request(context, net['id'], - del_segments) - n1kv_db_v2.del_multi_segment_binding(session, - net['id'], del_segments) - elif binding.network_type == c_const.NETWORK_TYPE_TRUNK: - network_profile = self.get_network_profile(context, - binding.profile_id) - add_segments = ( - self._parse_trunk_segments(context, network['network'], - n1kv.SEGMENT_ADD, - binding.physical_network, - network_profile['sub_type'])) - n1kv_db_v2.add_trunk_segment_binding(session, - net['id'], add_segments) - del_segments = ( - self._parse_trunk_segments(context, network['network'], - n1kv.SEGMENT_DEL, - binding.physical_network, - network_profile['sub_type'])) - n1kv_db_v2.del_trunk_segment_binding(session, - net['id'], del_segments) - self._extend_network_dict_provider(context, net) - self._extend_network_dict_profile(context, net) - if binding.network_type != c_const.NETWORK_TYPE_MULTI_SEGMENT: - self._send_update_network_request(context, net, add_segments, - del_segments) - LOG.debug(_("Updated network: %s"), net['id']) - return net - - def delete_network(self, context, id): - """ - Delete a network. - - :param context: neutron api request context - :param id: UUID representing the network to delete - """ - session = context.session - with session.begin(subtransactions=True): - binding = n1kv_db_v2.get_network_binding(session, id) - network = self.get_network(context, id) - if n1kv_db_v2.is_trunk_member(session, id): - msg = _("Cannot delete network '%s' " - "that is member of a trunk segment") % network['name'] - raise n_exc.InvalidInput(error_message=msg) - if n1kv_db_v2.is_multi_segment_member(session, id): - msg = _("Cannot delete network '%s' that is a member of a " - "multi-segment network") % network['name'] - raise n_exc.InvalidInput(error_message=msg) - if binding.network_type == c_const.NETWORK_TYPE_OVERLAY: - n1kv_db_v2.release_vxlan(session, binding.segmentation_id) - elif binding.network_type == c_const.NETWORK_TYPE_VLAN: - n1kv_db_v2.release_vlan(session, binding.physical_network, - binding.segmentation_id) - self._process_l3_delete(context, id) - super(N1kvNeutronPluginV2, self).delete_network(context, id) - # the network_binding record is deleted via cascade from - # the network record, so explicit removal is not necessary - self._send_delete_network_request(context, network) - LOG.debug(_("Deleted network: %s"), id) - - def get_network(self, context, id, fields=None): - """ - Retrieve a Network. - - :param context: neutron api request context - :param id: UUID representing the network to fetch - :returns: requested network dictionary - """ - LOG.debug(_("Get network: %s"), id) - net = super(N1kvNeutronPluginV2, self).get_network(context, id, None) - self._extend_network_dict_provider(context, net) - self._extend_network_dict_profile(context, net) - self._extend_network_dict_member_segments(context, net) - return self._fields(net, fields) - - def get_networks(self, context, filters=None, fields=None): - """ - Retrieve a list of networks. - - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - network object. Values in this dictiontary are an - iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a network - dictionary. Only these fields will be returned. - :returns: list of network dictionaries. - """ - LOG.debug(_("Get networks")) - nets = super(N1kvNeutronPluginV2, self).get_networks(context, filters, - None) - for net in nets: - self._extend_network_dict_provider(context, net) - self._extend_network_dict_profile(context, net) - - return [self._fields(net, fields) for net in nets] - - def create_port(self, context, port): - """ - Create neutron port. - - Create a port. Use a default policy profile for ports created for dhcp - and router interface. Default policy profile name is configured in the - /etc/neutron/cisco_plugins.ini file. - - :param context: neutron api request context - :param port: port dictionary - :returns: port object - """ - p_profile = None - port_count = None - vm_network_name = None - profile_id_set = False - - # Set the network policy profile id for auto generated L3/DHCP ports - if ('device_id' in port['port'] and port['port']['device_owner'] in - [constants.DEVICE_OWNER_DHCP, constants.DEVICE_OWNER_ROUTER_INTF, - constants.DEVICE_OWNER_ROUTER_GW, - constants.DEVICE_OWNER_FLOATINGIP]): - p_profile_name = c_conf.CISCO_N1K.network_node_policy_profile - p_profile = self._get_policy_profile_by_name(p_profile_name) - if p_profile: - port['port']['n1kv:profile_id'] = p_profile['id'] - - if n1kv.PROFILE_ID in port['port']: - profile_id = port['port'].get(n1kv.PROFILE_ID) - profile_id_set = attributes.is_attr_set(profile_id) - - # Set the default policy profile id for ports if no id is set - if not profile_id_set: - p_profile_name = c_conf.CISCO_N1K.default_policy_profile - p_profile = self._get_policy_profile_by_name(p_profile_name) - if p_profile: - port['port']['n1kv:profile_id'] = p_profile['id'] - profile_id_set = True - - profile_id = self._process_policy_profile(context, - port['port']) - LOG.debug(_('Create port: profile_id=%s'), profile_id) - session = context.session - with session.begin(subtransactions=True): - pt = super(N1kvNeutronPluginV2, self).create_port(context, - port) - n1kv_db_v2.add_port_binding(session, pt['id'], profile_id) - self._extend_port_dict_profile(context, pt) - try: - vm_network = n1kv_db_v2.get_vm_network( - context.session, - profile_id, - pt['network_id']) - except cisco_exceptions.VMNetworkNotFound: - # Create a VM Network if no VM network exists. - vm_network_name = "%s%s_%s" % (c_const.VM_NETWORK_NAME_PREFIX, - profile_id, - pt['network_id']) - port_count = 1 - n1kv_db_v2.add_vm_network(context.session, - vm_network_name, - profile_id, - pt['network_id'], - port_count) - else: - # Update port count of the VM network. - vm_network_name = vm_network['name'] - port_count = vm_network['port_count'] + 1 - n1kv_db_v2.update_vm_network_port_count(context.session, - vm_network_name, - port_count) - self._process_portbindings_create_and_update(context, - port['port'], - pt) - # Extract policy profile for VM network create in VSM. - if not p_profile: - p_profile = n1kv_db_v2.get_policy_profile(session, profile_id) - try: - self._send_create_port_request(context, - pt, - port_count, - p_profile, - vm_network_name) - except(cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - super(N1kvNeutronPluginV2, self).delete_port(context, pt['id']) - else: - LOG.debug(_("Created port: %s"), pt) - return pt - - def update_port(self, context, id, port): - """ - Update port parameters. - - :param context: neutron api request context - :param id: UUID representing the port to update - :returns: updated port object - """ - LOG.debug(_("Update port: %s"), id) - with context.session.begin(subtransactions=True): - updated_port = super(N1kvNeutronPluginV2, - self).update_port(context, id, port) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - self._extend_port_dict_profile(context, updated_port) - return updated_port - - def delete_port(self, context, id, l3_port_check=True): - """ - Delete a port. - - :param context: neutron api request context - :param id: UUID representing the port to delete - """ - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - with context.session.begin(subtransactions=True): - port = self.get_port(context, id) - vm_network = n1kv_db_v2.get_vm_network(context.session, - port[n1kv.PROFILE_ID], - port['network_id']) - vm_network['port_count'] -= 1 - n1kv_db_v2.update_vm_network_port_count(context.session, - vm_network['name'], - vm_network['port_count']) - if vm_network['port_count'] == 0: - n1kv_db_v2.delete_vm_network(context.session, - port[n1kv.PROFILE_ID], - port['network_id']) - self.disassociate_floatingips(context, id) - super(N1kvNeutronPluginV2, self).delete_port(context, id) - self._send_delete_port_request(context, port, vm_network) - - def get_port(self, context, id, fields=None): - """ - Retrieve a port. - :param context: neutron api request context - :param id: UUID representing the port to retrieve - :param fields: a list of strings that are valid keys in a port - dictionary. Only these fields will be returned. - :returns: port dictionary - """ - LOG.debug(_("Get port: %s"), id) - port = super(N1kvNeutronPluginV2, self).get_port(context, id, None) - self._extend_port_dict_profile(context, port) - return self._fields(port, fields) - - def get_ports(self, context, filters=None, fields=None): - """ - Retrieve a list of ports. - - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - port object. Values in this dictiontary are an - iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a port - dictionary. Only these fields will be returned. - :returns: list of port dictionaries - """ - LOG.debug(_("Get ports")) - ports = super(N1kvNeutronPluginV2, self).get_ports(context, filters, - None) - for port in ports: - self._extend_port_dict_profile(context, port) - - return [self._fields(port, fields) for port in ports] - - def create_subnet(self, context, subnet): - """ - Create subnet for a given network. - - :param context: neutron api request context - :param subnet: subnet dictionary - :returns: subnet object - """ - LOG.debug(_('Create subnet')) - sub = super(N1kvNeutronPluginV2, self).create_subnet(context, subnet) - try: - self._send_create_subnet_request(context, sub) - except(cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - super(N1kvNeutronPluginV2, self).delete_subnet(context, sub['id']) - else: - LOG.debug(_("Created subnet: %s"), sub['id']) - return sub - - def update_subnet(self, context, id, subnet): - """ - Update a subnet. - - :param context: neutron api request context - :param id: UUID representing subnet to update - :returns: updated subnet object - """ - LOG.debug(_('Update subnet')) - sub = super(N1kvNeutronPluginV2, self).update_subnet(context, - id, - subnet) - self._send_update_subnet_request(sub) - return sub - - def delete_subnet(self, context, id): - """ - Delete a subnet. - - :param context: neutron api request context - :param id: UUID representing subnet to delete - :returns: deleted subnet object - """ - LOG.debug(_('Delete subnet: %s'), id) - subnet = self.get_subnet(context, id) - self._send_delete_subnet_request(context, subnet) - return super(N1kvNeutronPluginV2, self).delete_subnet(context, id) - - def get_subnet(self, context, id, fields=None): - """ - Retrieve a subnet. - - :param context: neutron api request context - :param id: UUID representing subnet to retrieve - :params fields: a list of strings that are valid keys in a subnet - dictionary. Only these fields will be returned. - :returns: subnet object - """ - LOG.debug(_("Get subnet: %s"), id) - subnet = super(N1kvNeutronPluginV2, self).get_subnet(context, id, - None) - return self._fields(subnet, fields) - - def get_subnets(self, context, filters=None, fields=None): - """ - Retrieve a list of subnets. - - :param context: neutron api request context - :param filters: a dictionary with keys that are valid keys for a - subnet object. Values in this dictiontary are an - iterable containing values that will be used for an - exact match comparison for that value. Each result - returned by this function will have matched one of the - values for each key in filters - :params fields: a list of strings that are valid keys in a subnet - dictionary. Only these fields will be returned. - :returns: list of dictionaries of subnets - """ - LOG.debug(_("Get subnets")) - subnets = super(N1kvNeutronPluginV2, self).get_subnets(context, - filters, - None) - return [self._fields(subnet, fields) for subnet in subnets] - - def create_network_profile(self, context, network_profile): - """ - Create a network profile. - - Create a network profile, which represents a pool of networks - belonging to one type (VLAN or Overlay). On creation of network - profile, we retrieve the admin tenant-id which we use to replace - the previously stored fake tenant-id in tenant-profile bindings. - :param context: neutron api request context - :param network_profile: network profile dictionary - :returns: network profile object - """ - self._replace_fake_tenant_id_with_real(context) - with context.session.begin(subtransactions=True): - net_p = super(N1kvNeutronPluginV2, - self).create_network_profile(context, - network_profile) - try: - self._send_create_logical_network_request(net_p, - context.tenant_id) - except(cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - n1kv_db_v2.delete_profile_binding(context.session, - context.tenant_id, - net_p['id']) - try: - self._send_create_network_profile_request(context, net_p) - except(cisco_exceptions.VSMError, - cisco_exceptions.VSMConnectionFailed): - n1kv_db_v2.delete_profile_binding(context.session, - context.tenant_id, - net_p['id']) - self._send_delete_logical_network_request(net_p) - return net_p - - def delete_network_profile(self, context, id): - """ - Delete a network profile. - - :param context: neutron api request context - :param id: UUID of the network profile to delete - :returns: deleted network profile object - """ - with context.session.begin(subtransactions=True): - net_p = super(N1kvNeutronPluginV2, - self).delete_network_profile(context, id) - self._send_delete_network_profile_request(net_p) - self._send_delete_logical_network_request(net_p) - - def update_network_profile(self, context, net_profile_id, network_profile): - """ - Update a network profile. - - :param context: neutron api request context - :param net_profile_id: UUID of the network profile to update - :param network_profile: dictionary containing network profile object - """ - session = context.session - with session.begin(subtransactions=True): - net_p = (super(N1kvNeutronPluginV2, self). - update_network_profile(context, - net_profile_id, - network_profile)) - self._send_update_network_profile_request(net_p) - return net_p - - def create_router(self, context, router): - """ - Handle creation of router. - - Schedule router to L3 agent as part of the create handling. - :param context: neutron api request context - :param router: router dictionary - :returns: router object - """ - session = context.session - with session.begin(subtransactions=True): - rtr = (super(N1kvNeutronPluginV2, self). - create_router(context, router)) - LOG.debug(_("Scheduling router %s"), rtr['id']) - self.schedule_router(context, rtr['id']) - return rtr diff --git a/neutron/plugins/cisco/network_plugin.py b/neutron/plugins/cisco/network_plugin.py deleted file mode 100644 index ee35fec81..000000000 --- a/neutron/plugins/cisco/network_plugin.py +++ /dev/null @@ -1,176 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. - -import logging - -import webob.exc as wexc - -from neutron.api import extensions as neutron_extensions -from neutron.api.v2 import base -from neutron.db import db_base_plugin_v2 -from neutron.openstack.common import importutils -from neutron.plugins.cisco.common import cisco_exceptions as cexc -from neutron.plugins.cisco.common import config -from neutron.plugins.cisco.db import network_db_v2 as cdb -from neutron.plugins.cisco import extensions - -LOG = logging.getLogger(__name__) - - -class PluginV2(db_base_plugin_v2.NeutronDbPluginV2): - """Meta-Plugin with v2 API support for multiple sub-plugins.""" - _supported_extension_aliases = ["credential", "Cisco qos"] - _methods_to_delegate = ['create_network', - 'delete_network', 'update_network', 'get_network', - 'get_networks', - 'create_port', 'delete_port', - 'update_port', 'get_port', 'get_ports', - 'create_subnet', - 'delete_subnet', 'update_subnet', - 'get_subnet', 'get_subnets', ] - - CISCO_FAULT_MAP = { - cexc.CredentialAlreadyExists: wexc.HTTPBadRequest, - cexc.CredentialNameNotFound: wexc.HTTPNotFound, - cexc.CredentialNotFound: wexc.HTTPNotFound, - cexc.NetworkSegmentIDNotFound: wexc.HTTPNotFound, - cexc.NetworkVlanBindingAlreadyExists: wexc.HTTPBadRequest, - cexc.NexusComputeHostNotConfigured: wexc.HTTPNotFound, - cexc.NexusConfigFailed: wexc.HTTPBadRequest, - cexc.NexusConnectFailed: wexc.HTTPServiceUnavailable, - cexc.NexusPortBindingNotFound: wexc.HTTPNotFound, - cexc.NoMoreNics: wexc.HTTPBadRequest, - cexc.PortIdForNexusSvi: wexc.HTTPBadRequest, - cexc.PortVnicBindingAlreadyExists: wexc.HTTPBadRequest, - cexc.PortVnicNotFound: wexc.HTTPNotFound, - cexc.QosNameAlreadyExists: wexc.HTTPBadRequest, - cexc.QosNotFound: wexc.HTTPNotFound, - cexc.SubnetNotSpecified: wexc.HTTPBadRequest, - cexc.VlanIDNotAvailable: wexc.HTTPNotFound, - cexc.VlanIDNotFound: wexc.HTTPNotFound, - } - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - if hasattr(self._model, "supported_extension_aliases"): - aliases.extend(self._model.supported_extension_aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - """Load the model class.""" - self._model_name = config.CISCO.model_class - self._model = importutils.import_object(self._model_name) - native_bulk_attr_name = ("_%s__native_bulk_support" - % self._model.__class__.__name__) - self.__native_bulk_support = getattr(self._model, - native_bulk_attr_name, False) - - neutron_extensions.append_api_extensions_path(extensions.__path__) - - # Extend the fault map - self._extend_fault_map() - - LOG.debug(_("Plugin initialization complete")) - - def __getattribute__(self, name): - """Delegate core API calls to the model class. - - Core API calls are delegated directly to the configured model class. - Note: Bulking calls will be handled by this class, and turned into - non-bulking calls to be considered for delegation. - """ - methods = object.__getattribute__(self, "_methods_to_delegate") - if name in methods: - return getattr(object.__getattribute__(self, "_model"), - name) - else: - return object.__getattribute__(self, name) - - def __getattr__(self, name): - """Delegate calls to the extensions. - - This delegates the calls to the extensions explicitly implemented by - the model. - """ - if hasattr(self._model, name): - return getattr(self._model, name) - else: - # Must make sure we re-raise the error that led us here, since - # otherwise getattr() and even hasattr() doesn't work correctly. - raise AttributeError( - _("'%(model)s' object has no attribute '%(name)s'") % - {'model': self._model_name, 'name': name}) - - def _extend_fault_map(self): - """Extend the Neutron Fault Map for Cisco exceptions. - - Map exceptions which are specific to the Cisco Plugin - to standard HTTP exceptions. - - """ - base.FAULT_MAP.update(self.CISCO_FAULT_MAP) - - """ - Extension API implementation - """ - def get_all_qoss(self, tenant_id): - """Get all QoS levels.""" - LOG.debug(_("get_all_qoss() called")) - qoslist = cdb.get_all_qoss(tenant_id) - return qoslist - - def get_qos_details(self, tenant_id, qos_id): - """Get QoS Details.""" - LOG.debug(_("get_qos_details() called")) - return cdb.get_qos(tenant_id, qos_id) - - def create_qos(self, tenant_id, qos_name, qos_desc): - """Create a QoS level.""" - LOG.debug(_("create_qos() called")) - qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc)) - return qos - - def delete_qos(self, tenant_id, qos_id): - """Delete a QoS level.""" - LOG.debug(_("delete_qos() called")) - return cdb.remove_qos(tenant_id, qos_id) - - def rename_qos(self, tenant_id, qos_id, new_name): - """Rename QoS level.""" - LOG.debug(_("rename_qos() called")) - return cdb.update_qos(tenant_id, qos_id, new_name) - - def get_all_credentials(self): - """Get all credentials.""" - LOG.debug(_("get_all_credentials() called")) - credential_list = cdb.get_all_credentials() - return credential_list - - def get_credential_details(self, credential_id): - """Get a particular credential.""" - LOG.debug(_("get_credential_details() called")) - return cdb.get_credential(credential_id) - - def rename_credential(self, credential_id, new_name, new_password): - """Rename the particular credential resource.""" - LOG.debug(_("rename_credential() called")) - return cdb.update_credential(credential_id, new_name, - new_password=new_password) diff --git a/neutron/plugins/cisco/nexus/__init__.py b/neutron/plugins/cisco/nexus/__init__.py deleted file mode 100644 index 963eb547f..000000000 --- a/neutron/plugins/cisco/nexus/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Edgar Magana, Cisco Systems, Inc. -""" -Init module for Nexus Driver -""" diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py b/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py deleted file mode 100644 index bef145f03..000000000 --- a/neutron/plugins/cisco/nexus/cisco_nexus_network_driver_v2.py +++ /dev/null @@ -1,196 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Debojyoti Dutta, Cisco Systems, Inc. -# @author: Edgar Magana, Cisco Systems Inc. -# -""" -Implements a Nexus-OS NETCONF over SSHv2 API Client -""" - -import logging - -from ncclient import manager - -from neutron.openstack.common import excutils -from neutron.plugins.cisco.common import cisco_constants as const -from neutron.plugins.cisco.common import cisco_credentials_v2 as cred -from neutron.plugins.cisco.common import cisco_exceptions as cexc -from neutron.plugins.cisco.common import config as conf -from neutron.plugins.cisco.db import nexus_db_v2 -from neutron.plugins.cisco.nexus import cisco_nexus_snippets as snipp - -LOG = logging.getLogger(__name__) - - -class CiscoNEXUSDriver(): - """Nexus Driver Main Class.""" - def __init__(self): - cisco_switches = conf.get_device_dictionary() - self.nexus_switches = dict(((key[1], key[2]), val) - for key, val in cisco_switches.items() - if key[0] == 'NEXUS_SWITCH') - self.credentials = {} - self.connections = {} - - def _edit_config(self, nexus_host, target='running', config='', - allowed_exc_strs=None): - """Modify switch config for a target config type. - - :param nexus_host: IP address of switch to configure - :param target: Target config type - :param config: Configuration string in XML format - :param allowed_exc_strs: Exceptions which have any of these strings - as a subset of their exception message - (str(exception)) can be ignored - - :raises: NexusConfigFailed - - """ - if not allowed_exc_strs: - allowed_exc_strs = [] - mgr = self.nxos_connect(nexus_host) - try: - mgr.edit_config(target, config=config) - except Exception as e: - for exc_str in allowed_exc_strs: - if exc_str in str(e): - break - else: - # Raise a Neutron exception. Include a description of - # the original ncclient exception. No need to preserve T/B - raise cexc.NexusConfigFailed(config=config, exc=e) - - def get_credential(self, nexus_ip): - if nexus_ip not in self.credentials: - nexus_username = cred.Store.get_username(nexus_ip) - nexus_password = cred.Store.get_password(nexus_ip) - self.credentials[nexus_ip] = { - const.USERNAME: nexus_username, - const.PASSWORD: nexus_password - } - return self.credentials[nexus_ip] - - def nxos_connect(self, nexus_host): - """Make SSH connection to the Nexus Switch.""" - if getattr(self.connections.get(nexus_host), 'connected', None): - return self.connections[nexus_host] - - nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) - nexus_creds = self.get_credential(nexus_host) - nexus_user = nexus_creds[const.USERNAME] - nexus_password = nexus_creds[const.PASSWORD] - try: - man = manager.connect(host=nexus_host, - port=nexus_ssh_port, - username=nexus_user, - password=nexus_password) - self.connections[nexus_host] = man - except Exception as e: - # Raise a Neutron exception. Include a description of - # the original ncclient exception. No need to preserve T/B. - raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) - - return self.connections[nexus_host] - - def create_xml_snippet(self, cutomized_config): - """Create XML snippet. - - Creates the Proper XML structure for the Nexus Switch Configuration. - """ - conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (cutomized_config) - return conf_xml_snippet - - def create_vlan(self, nexus_host, vlanid, vlanname): - """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" - confstr = self.create_xml_snippet( - snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) - self._edit_config(nexus_host, target='running', config=confstr) - - # Enable VLAN active and no-shutdown states. Some versions of - # Nexus switch do not allow state changes for the extended VLAN - # range (1006-4094), but these errors can be ignored (default - # values are appropriate). - state_config = [snipp.CMD_VLAN_ACTIVE_SNIPPET, - snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET] - for snippet in state_config: - try: - confstr = self.create_xml_snippet(snippet % vlanid) - self._edit_config( - nexus_host, - target='running', - config=confstr, - allowed_exc_strs=["Can't modify state for extended", - "Command is only allowed on VLAN"]) - except cexc.NexusConfigFailed: - with excutils.save_and_reraise_exception(): - self.delete_vlan(nexus_host, vlanid) - - def delete_vlan(self, nexus_host, vlanid): - """Delete a VLAN on Nexus Switch given the VLAN ID.""" - confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid - confstr = self.create_xml_snippet(confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def enable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): - """Enable a VLAN on a trunk interface.""" - # If one or more VLANs are already configured on this interface, - # include the 'add' keyword. - if nexus_db_v2.get_port_switch_bindings('%s:%s' % (etype, interface), - nexus_host): - snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET - else: - snippet = snipp.CMD_INT_VLAN_SNIPPET - confstr = snippet % (etype, interface, vlanid, etype) - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def disable_vlan_on_trunk_int(self, nexus_host, vlanid, etype, interface): - """Disable a VLAN on a trunk interface.""" - confstr = snipp.CMD_NO_VLAN_INT_SNIPPET % (etype, interface, - vlanid, etype) - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, - etype, nexus_port): - """Create VLAN and trunk it on the specified ports.""" - self.create_vlan(nexus_host, vlan_id, vlan_name) - LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) - if nexus_port: - self.enable_vlan_on_trunk_int(nexus_host, vlan_id, - etype, nexus_port) - - def delete_and_untrunk_vlan(self, nexus_host, vlan_id, etype, nexus_port): - """Delete VLAN and untrunk it from the specified ports.""" - self.delete_vlan(nexus_host, vlan_id) - if nexus_port: - self.disable_vlan_on_trunk_int(nexus_host, vlan_id, - etype, nexus_port) - - def create_vlan_svi(self, nexus_host, vlan_id, gateway_ip): - confstr = snipp.CMD_VLAN_SVI_SNIPPET % (vlan_id, gateway_ip) - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def delete_vlan_svi(self, nexus_host, vlan_id): - confstr = snipp.CMD_NO_VLAN_SVI_SNIPPET % vlan_id - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py b/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py deleted file mode 100644 index e9e34811a..000000000 --- a/neutron/plugins/cisco/nexus/cisco_nexus_plugin_v2.py +++ /dev/null @@ -1,347 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Cisco Systems, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Edgar Magana, Cisco Systems, Inc. -# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com) -# - -""" -PlugIn for Nexus OS driver -""" - -import logging - -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.plugins.cisco.common import cisco_constants as const -from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc -from neutron.plugins.cisco.common import config as conf -from neutron.plugins.cisco.db import network_db_v2 as cdb -from neutron.plugins.cisco.db import nexus_db_v2 as nxos_db -from neutron.plugins.cisco import l2device_plugin_base - - -LOG = logging.getLogger(__name__) - - -class NexusPlugin(l2device_plugin_base.L2DevicePluginBase): - """Nexus PlugIn Main Class.""" - _networks = {} - - def __init__(self): - """Extract configuration parameters from the configuration file.""" - self._client = importutils.import_object(conf.CISCO.nexus_driver) - LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver) - self._nexus_switches = conf.get_device_dictionary() - - def create_network(self, network, attachment): - """Create or update a network when an attachment is changed. - - This method is not invoked at the usual plugin create_network() time. - Instead, it is invoked on create/update port. - - :param network: Network on which the port operation is happening - :param attachment: Details about the owner of the port - - Create a VLAN in the appropriate switch/port, and configure the - appropriate interfaces for this VLAN. - """ - LOG.debug(_("NexusPlugin:create_network() called")) - # Grab the switch IPs and ports for this host - host_connections = [] - host = attachment['host_name'] - for switch_type, switch_ip, attr in self._nexus_switches: - if str(attr) == str(host): - port = self._nexus_switches[switch_type, switch_ip, attr] - # Get ether type for port, assume an ethernet type - # if none specified. - if ':' in port: - etype, port_id = port.split(':') - else: - etype, port_id = 'ethernet', port - host_connections.append((switch_ip, etype, port_id)) - if not host_connections: - raise cisco_exc.NexusComputeHostNotConfigured(host=host) - - vlan_id = network[const.NET_VLAN_ID] - vlan_name = network[const.NET_VLAN_NAME] - auto_create = True - auto_trunk = True - if cdb.is_provider_vlan(vlan_id): - vlan_name = ''.join([conf.CISCO.provider_vlan_name_prefix, - str(vlan_id)]) - auto_create = conf.CISCO.provider_vlan_auto_create - auto_trunk = conf.CISCO.provider_vlan_auto_trunk - - # Check if this network is already in the DB - for switch_ip, etype, port_id in host_connections: - vlan_created = False - vlan_trunked = False - eport_id = '%s:%s' % (etype, port_id) - # Check for switch vlan bindings - try: - # This vlan has already been created on this switch - # via another operation, like SVI bindings. - nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) - vlan_created = True - auto_create = False - except cisco_exc.NexusPortBindingNotFound: - # No changes, proceed as normal - pass - - try: - nxos_db.get_port_vlan_switch_binding(eport_id, vlan_id, - switch_ip) - except cisco_exc.NexusPortBindingNotFound: - if auto_create and auto_trunk: - # Create vlan and trunk vlan on the port - LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) - self._client.create_and_trunk_vlan( - switch_ip, vlan_id, vlan_name, etype, port_id) - vlan_created = True - vlan_trunked = True - elif auto_create: - # Create vlan but do not trunk it on the port - LOG.debug(_("Nexus: create vlan %s"), vlan_name) - self._client.create_vlan(switch_ip, vlan_id, vlan_name) - vlan_created = True - elif auto_trunk: - # Only trunk vlan on the port - LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) - self._client.enable_vlan_on_trunk_int( - switch_ip, vlan_id, etype, port_id) - vlan_trunked = True - - try: - instance = attachment[const.INSTANCE_ID] - nxos_db.add_nexusport_binding(eport_id, str(vlan_id), - switch_ip, instance) - except Exception: - with excutils.save_and_reraise_exception(): - # Add binding failed, roll back any vlan creation/enabling - if vlan_created and vlan_trunked: - LOG.debug(_("Nexus: delete & untrunk vlan %s"), - vlan_name) - self._client.delete_and_untrunk_vlan(switch_ip, - vlan_id, - etype, port_id) - elif vlan_created: - LOG.debug(_("Nexus: delete vlan %s"), vlan_name) - self._client.delete_vlan(switch_ip, vlan_id) - elif vlan_trunked: - LOG.debug(_("Nexus: untrunk vlan %s"), vlan_name) - self._client.disable_vlan_on_trunk_int(switch_ip, - vlan_id, - etype, - port_id) - - net_id = network[const.NET_ID] - new_net_dict = {const.NET_ID: net_id, - const.NET_NAME: network[const.NET_NAME], - const.NET_PORTS: {}, - const.NET_VLAN_NAME: vlan_name, - const.NET_VLAN_ID: vlan_id} - self._networks[net_id] = new_net_dict - return new_net_dict - - def add_router_interface(self, vlan_name, vlan_id, subnet_id, - gateway_ip, router_id): - """Create VLAN SVI on the Nexus switch.""" - # Find a switch to create the SVI on - switch_ip = self._find_switch_for_svi() - if not switch_ip: - raise cisco_exc.NoNexusSviSwitch() - - # Check if this vlan exists on the switch already - try: - nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) - except cisco_exc.NexusPortBindingNotFound: - # Create vlan and trunk vlan on the port - self._client.create_and_trunk_vlan( - switch_ip, vlan_id, vlan_name, etype=None, nexus_port=None) - # Check if a router interface has already been created - try: - nxos_db.get_nexusvm_bindings(vlan_id, router_id) - raise cisco_exc.SubnetInterfacePresent(subnet_id=subnet_id, - router_id=router_id) - except cisco_exc.NexusPortBindingNotFound: - self._client.create_vlan_svi(switch_ip, vlan_id, gateway_ip) - nxos_db.add_nexusport_binding('router', str(vlan_id), - switch_ip, router_id) - - return True - - def remove_router_interface(self, vlan_id, router_id): - """Remove VLAN SVI from the Nexus Switch.""" - # Grab switch_ip from database - switch_ip = nxos_db.get_nexusvm_bindings(vlan_id, - router_id)[0].switch_ip - - # Delete the SVI interface from the switch - self._client.delete_vlan_svi(switch_ip, vlan_id) - - # Invoke delete_port to delete this row - # And delete vlan if required - return self.delete_port(router_id, vlan_id) - - def _find_switch_for_svi(self): - """Get a switch to create the SVI on.""" - LOG.debug(_("Grabbing a switch to create SVI")) - nexus_switches = self._client.nexus_switches - if conf.CISCO.svi_round_robin: - LOG.debug(_("Using round robin to create SVI")) - switch_dict = dict( - (switch_ip, 0) for switch_ip, _ in nexus_switches) - try: - bindings = nxos_db.get_nexussvi_bindings() - # Build a switch dictionary with weights - for binding in bindings: - switch_ip = binding.switch_ip - if switch_ip not in switch_dict: - switch_dict[switch_ip] = 1 - else: - switch_dict[switch_ip] += 1 - # Search for the lowest value in the dict - if switch_dict: - switch_ip = min(switch_dict, key=switch_dict.get) - return switch_ip - except cisco_exc.NexusPortBindingNotFound: - pass - - LOG.debug(_("No round robin or zero weights, using first switch")) - # Return the first switch in the config - return conf.first_device_ip - - def delete_network(self, tenant_id, net_id, **kwargs): - """Delete network. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:delete_network() called")) # pragma no cover - - def update_network(self, tenant_id, net_id, **kwargs): - """Update the properties of a particular Virtual Network. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:update_network() called")) # pragma no cover - - def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): - """Create port. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:create_port() called")) # pragma no cover - - def delete_port(self, device_id, vlan_id): - """Delete port. - - Delete port bindings from the database and scan whether the network - is still required on the interfaces trunked. - """ - LOG.debug(_("NexusPlugin:delete_port() called")) - # Delete DB row(s) for this port - try: - rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) - except cisco_exc.NexusPortBindingNotFound: - return - - auto_delete = True - auto_untrunk = True - if cdb.is_provider_vlan(vlan_id): - auto_delete = conf.CISCO.provider_vlan_auto_create - auto_untrunk = conf.CISCO.provider_vlan_auto_trunk - LOG.debug(_("delete_network(): provider vlan %s"), vlan_id) - - instance_id = False - for row in rows: - instance_id = row['instance_id'] - switch_ip = row.switch_ip - etype, nexus_port = '', '' - if row['port_id'] == 'router': - etype, nexus_port = 'vlan', row['port_id'] - auto_untrunk = False - else: - etype, nexus_port = row['port_id'].split(':') - - nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id, - row.switch_ip, - row.instance_id) - # Check whether there are any remaining instances using this - # vlan on this Nexus port. - try: - nxos_db.get_port_vlan_switch_binding(row.port_id, - row.vlan_id, - row.switch_ip) - except cisco_exc.NexusPortBindingNotFound: - try: - if nexus_port and auto_untrunk: - # Untrunk the vlan from this Nexus interface - self._client.disable_vlan_on_trunk_int( - switch_ip, row.vlan_id, etype, nexus_port) - - # Check whether there are any remaining instances - # using this vlan on the Nexus switch. - if auto_delete: - try: - nxos_db.get_nexusvlan_binding(row.vlan_id, - row.switch_ip) - except cisco_exc.NexusPortBindingNotFound: - # Delete this vlan from this switch - self._client.delete_vlan(switch_ip, row.vlan_id) - except Exception: - # The delete vlan operation on the Nexus failed, - # so this delete_port request has failed. For - # consistency, roll back the Nexus database to what - # it was before this request. - with excutils.save_and_reraise_exception(): - nxos_db.add_nexusport_binding(row.port_id, - row.vlan_id, - row.switch_ip, - row.instance_id) - - return instance_id - - def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs): - """Update port. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:update_port() called")) # pragma no cover - - def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, - **kwargs): - """Plug interfaces. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:plug_interface() called")) # pragma no cover - - def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): - """Unplug interface. - - Not applicable to Nexus plugin. Defined here to satisfy abstract - method requirements. - """ - LOG.debug(_("NexusPlugin:unplug_interface() called") - ) # pragma no cover diff --git a/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py b/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py deleted file mode 100644 index 90d265443..000000000 --- a/neutron/plugins/cisco/nexus/cisco_nexus_snippets.py +++ /dev/null @@ -1,180 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2011 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, Cisco Systems, Inc. -# @author: Arvind Somya (asomya@cisco.com) Cisco Systems, Inc. - -""" -Nexus-OS XML-based configuration snippets -""" - -import logging - - -LOG = logging.getLogger(__name__) - - -# The following are standard strings, messages used to communicate with Nexus, -EXEC_CONF_SNIPPET = """ - - - <__XML__MODE__exec_configure>%s - - - -""" - -CMD_VLAN_CONF_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - %s - - - - -""" - -CMD_VLAN_ACTIVE_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - active - - - - -""" - -CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - - - - - -""" - -CMD_NO_VLAN_CONF_SNIPPET = """ - - - - <__XML__PARAM_value>%s - - - -""" - -CMD_INT_VLAN_HEADER = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - """ - -CMD_VLAN_ID = """ - %s""" - -CMD_VLAN_ADD_ID = """ - %s - """ % CMD_VLAN_ID - -CMD_INT_VLAN_TRAILER = """ - - - - - - - -""" - -CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + - CMD_VLAN_ID + - CMD_INT_VLAN_TRAILER) - -CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + - CMD_VLAN_ADD_ID + - CMD_INT_VLAN_TRAILER) - -CMD_NO_VLAN_INT_SNIPPET = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - - - - %s - - - - - - - - -""" - -FILTER_SHOW_VLAN_BRIEF_SNIPPET = """ - - - - - -""" - -CMD_VLAN_SVI_SNIPPET = """ - - - %s - <__XML__MODE_vlan> - - - - -
-
%s
-
-
- -
-
-""" - -CMD_NO_VLAN_SVI_SNIPPET = """ - - - - %s - - - -""" diff --git a/neutron/plugins/cisco/test/__init__.py b/neutron/plugins/cisco/test/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/cisco/test/nexus/__init__.py b/neutron/plugins/cisco/test/nexus/__init__.py deleted file mode 100644 index a68ed41ea..000000000 --- a/neutron/plugins/cisco/test/nexus/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import __builtin__ -setattr(__builtin__, '_', lambda x: x) diff --git a/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py b/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py deleted file mode 100644 index b40cbef14..000000000 --- a/neutron/plugins/cisco/test/nexus/fake_nexus_driver.py +++ /dev/null @@ -1,101 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Rohit Agarwalla, Cisco Systems, Inc. - - -class CiscoNEXUSFakeDriver(): - """Nexus Driver Fake Class.""" - - def __init__(self): - pass - - def nxos_connect(self, nexus_host, nexus_ssh_port, nexus_user, - nexus_password): - """Make the fake connection to the Nexus Switch.""" - pass - - def create_xml_snippet(self, cutomized_config): - """Create XML snippet. - - Creates the Proper XML structure for the Nexus Switch - Configuration. - """ - pass - - def enable_vlan(self, mgr, vlanid, vlanname): - """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" - pass - - def disable_vlan(self, mgr, vlanid): - """Delete a VLAN on Nexus Switch given the VLAN ID.""" - pass - - def disable_switch_port(self, mgr, interface): - """Disable trunk mode an interface on Nexus Switch.""" - pass - - def enable_vlan_on_trunk_int(self, mgr, etype, interface, vlanid): - """Enable vlan on trunk interface. - - Enable trunk mode vlan access an interface on Nexus Switch given - VLANID. - """ - pass - - def disable_vlan_on_trunk_int(self, mgr, interface, vlanid): - """Disables vlan in trunk interface. - - Enables trunk mode vlan access an interface on Nexus Switch given - VLANID. - """ - pass - - def create_vlan(self, vlan_name, vlan_id, nexus_host, nexus_user, - nexus_password, nexus_ports, nexus_ssh_port, vlan_ids): - """Create VLAN and enable it on interface. - - Creates a VLAN and Enable on trunk mode an interface on Nexus Switch - given the VLAN ID and Name and Interface Number. - """ - pass - - def delete_vlan(self, vlan_id, nexus_host, nexus_user, nexus_password, - nexus_ports, nexus_ssh_port): - """Delete VLAN. - - Delete a VLAN and Disables trunk mode an interface on Nexus Switch - given the VLAN ID and Interface Number. - """ - pass - - def build_vlans_cmd(self): - """Build a string with all the VLANs on the same Switch.""" - pass - - def add_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, - nexus_ports, nexus_ssh_port, vlan_ids=None): - """Add a vlan from interfaces on the Nexus switch given the VLAN ID.""" - pass - - def remove_vlan_int(self, vlan_id, nexus_host, nexus_user, nexus_password, - nexus_ports, nexus_ssh_port): - """Remove vlan from interfaces. - - Removes a vlan from interfaces on the Nexus switch given the VLAN ID. - """ - pass diff --git a/neutron/plugins/embrane/README b/neutron/plugins/embrane/README deleted file mode 100644 index 15ad1abbd..000000000 --- a/neutron/plugins/embrane/README +++ /dev/null @@ -1,9 +0,0 @@ -Embrane Neutron Plugin - -This plugin interfaces OpenStack Neutron with Embrane's heleos platform, which -provides layer 3-7 network services for cloud environments. - -L2 connectivity is leveraged by one of the supported existing plugins. - -For more details on use, configuration and implementation please refer to: -http://wiki.openstack.org/wiki/Neutron/EmbraneNeutronPlugin \ No newline at end of file diff --git a/neutron/plugins/embrane/__init__.py b/neutron/plugins/embrane/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/__init__.py b/neutron/plugins/embrane/agent/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/agent/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/dispatcher.py b/neutron/plugins/embrane/agent/dispatcher.py deleted file mode 100644 index 121abe9ac..000000000 --- a/neutron/plugins/embrane/agent/dispatcher.py +++ /dev/null @@ -1,134 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from eventlet import greenthread -from eventlet import queue -from heleosapi import constants as h_con -from heleosapi import exceptions as h_exc - -from neutron.openstack.common import log as logging -from neutron.plugins.embrane.agent.operations import router_operations -from neutron.plugins.embrane.common import constants as p_con -from neutron.plugins.embrane.common import contexts as ctx - -LOG = logging.getLogger(__name__) - - -class Dispatcher(object): - - def __init__(self, plugin, async=True): - self._async = async - self._plugin = plugin - self.sync_items = dict() - - def dispatch_l3(self, d_context, args=(), kwargs={}): - item = d_context.item - event = d_context.event - n_context = d_context.n_context - chain = d_context.chain - - item_id = item["id"] - handlers = router_operations.handlers - if event in handlers: - for f in handlers[event]: - first_run = False - if item_id not in self.sync_items: - self.sync_items[item_id] = (queue.Queue(),) - first_run = True - self.sync_items[item_id][0].put( - ctx.OperationContext(event, n_context, item, chain, f, - args, kwargs)) - t = None - if first_run: - t = greenthread.spawn(self._consume_l3, - item_id, - self.sync_items[item_id][0], - self._plugin, - self._async) - self.sync_items[item_id] += (t,) - if not self._async: - t = self.sync_items[item_id][1] - t.wait() - - def _consume_l3(self, sync_item, sync_queue, plugin, a_sync): - current_state = None - while True: - try: - # If the DVA is deleted, the thread (and the associated queue) - # can die as well - if current_state == p_con.Status.DELETED: - del self.sync_items[sync_item] - return - try: - # If synchronous op, empty the queue as fast as possible - operation_context = sync_queue.get( - block=a_sync, - timeout=p_con.QUEUE_TIMEOUT) - except queue.Empty: - del self.sync_items[sync_item] - return - # Execute the preliminary operations - (operation_context.chain and - operation_context.chain.execute_all()) - # Execute the main operation, a transient state is maintained - # so that the consumer can decide if it has - # to be burned to the DB - transient_state = None - try: - dva_state = operation_context.function( - plugin._esm_api, - operation_context.n_context.tenant_id, - operation_context.item, - *operation_context.args, - **operation_context.kwargs) - if dva_state == p_con.Status.DELETED: - transient_state = dva_state - else: - if not dva_state: - transient_state = p_con.Status.ERROR - elif dva_state == h_con.DvaState.POWER_ON: - transient_state = p_con.Status.ACTIVE - else: - transient_state = p_con.Status.READY - - except (h_exc.PendingDva, h_exc.DvaNotFound, - h_exc.BrokenInterface, h_exc.DvaCreationFailed, - h_exc.DvaCreationPending, h_exc.BrokenDva, - h_exc.ConfigurationFailed) as ex: - LOG.warning(p_con.error_map[type(ex)] % ex.message) - transient_state = p_con.Status.ERROR - except h_exc.DvaDeleteFailed as ex: - LOG.warning(p_con.error_map[type(ex)] % ex.message) - transient_state = p_con.Status.DELETED - finally: - # if the returned transient state is None, no operations - # are required on the DVA status - if transient_state: - if transient_state == p_con.Status.DELETED: - current_state = plugin._delete_router( - operation_context.n_context, - operation_context.item["id"]) - # Error state cannot be reverted - elif transient_state != p_con.Status.ERROR: - current_state = plugin._update_neutron_state( - operation_context.n_context, - operation_context.item, - transient_state) - except Exception: - LOG.exception(_("Unhandled exception occurred")) diff --git a/neutron/plugins/embrane/agent/operations/__init__.py b/neutron/plugins/embrane/agent/operations/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/agent/operations/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/agent/operations/router_operations.py b/neutron/plugins/embrane/agent/operations/router_operations.py deleted file mode 100644 index a9d35bfd4..000000000 --- a/neutron/plugins/embrane/agent/operations/router_operations.py +++ /dev/null @@ -1,156 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -import functools - -from heleosapi import exceptions as h_exc - -from neutron.openstack.common import log as logging -from neutron.plugins.embrane.common import constants as p_con - -LOG = logging.getLogger(__name__) -handlers = dict() - - -def handler(event, handler): - def wrap(f): - if event not in handler.keys(): - new_func_list = [f] - handler[event] = new_func_list - else: - handler[event].append(f) - - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return f(*args, **kwargs) - return wrapped_f - return wrap - - -@handler(p_con.Events.CREATE_ROUTER, handlers) -def _create_dva_and_assign_address(api, tenant_id, neutron_router, - flavor, utif_info=None, - ip_allocation_info=None): - """Creates a new router, and assign the gateway interface if any.""" - - dva = api.create_router(tenant_id=tenant_id, - router_id=neutron_router["id"], - name=neutron_router["name"], - flavor=flavor, - up=neutron_router["admin_state_up"]) - try: - if utif_info: - api.grow_interface(utif_info, neutron_router["admin_state_up"], - tenant_id, neutron_router["id"]) - if ip_allocation_info: - dva = api.allocate_address(neutron_router["id"], - neutron_router["admin_state_up"], - ip_allocation_info) - except h_exc.PreliminaryOperationsFailed as ex: - raise h_exc.BrokenInterface(err_msg=ex.message) - - state = api.extract_dva_state(dva) - return state - - -@handler(p_con.Events.UPDATE_ROUTER, handlers) -def _update_dva_and_assign_address(api, tenant_id, neutron_router, - utif_info=None, ip_allocation_info=None, - routes_info=[]): - name = neutron_router["name"] - up = neutron_router["admin_state_up"] - r_id = neutron_router["id"] - if ip_allocation_info or routes_info: - up = True - dva = api.update_dva(tenant_id=tenant_id, router_id=r_id, name=name, - up=up, utif_info=utif_info) - if ip_allocation_info: - api.allocate_address(r_id, up, ip_allocation_info) - - if routes_info: - api.delete_extra_routes(r_id, up) - api.set_extra_routes(r_id, neutron_router["admin_state_up"], - routes_info) - - return api.extract_dva_state(dva) - - -@handler(p_con.Events.DELETE_ROUTER, handlers) -def _delete_dva(api, tenant_id, neutron_router): - try: - api.delete_dva(tenant_id, neutron_router["id"]) - except h_exc.DvaNotFound: - LOG.warning(_("The router %s had no physical representation," - "likely already deleted"), neutron_router["id"]) - return p_con.Status.DELETED - - -@handler(p_con.Events.GROW_ROUTER_IF, handlers) -def _grow_dva_iface_and_assign_address(api, tenant_id, neutron_router, - utif_info=None, - ip_allocation_info=None): - try: - dva = api.grow_interface(utif_info, neutron_router["admin_state_up"], - tenant_id, neutron_router["id"]) - if ip_allocation_info: - dva = api.allocate_address(neutron_router["id"], - neutron_router["admin_state_up"], - ip_allocation_info) - except h_exc.PreliminaryOperationsFailed as ex: - raise h_exc.BrokenInterface(err_msg=ex.message) - - state = api.extract_dva_state(dva) - return state - - -@handler(p_con.Events.SHRINK_ROUTER_IF, handlers) -def _shrink_dva_iface(api, tenant_id, neutron_router, port_id): - try: - dva = api.shrink_interface(tenant_id, neutron_router["id"], - neutron_router["admin_state_up"], port_id) - except h_exc.InterfaceNotFound: - LOG.warning(_("Interface %s not found in the heleos back-end," - "likely already deleted"), port_id) - return (p_con.Status.ACTIVE if neutron_router["admin_state_up"] else - p_con.Status.READY) - except h_exc.PreliminaryOperationsFailed as ex: - raise h_exc.BrokenInterface(err_msg=ex.message) - state = api.extract_dva_state(dva) - return state - - -@handler(p_con.Events.SET_NAT_RULE, handlers) -def _create_nat_rule(api, tenant_id, neutron_router, nat_info=None): - - dva = api.create_nat_entry(neutron_router["id"], - neutron_router["admin_state_up"], nat_info) - - state = api.extract_dva_state(dva) - return state - - -@handler(p_con.Events.RESET_NAT_RULE, handlers) -def _delete_nat_rule(api, tenant_id, neutron_router, floating_ip_id): - - dva = api.remove_nat_entry(neutron_router["id"], - neutron_router["admin_state_up"], - floating_ip_id) - - state = api.extract_dva_state(dva) - return state diff --git a/neutron/plugins/embrane/base_plugin.py b/neutron/plugins/embrane/base_plugin.py deleted file mode 100644 index 33d213888..000000000 --- a/neutron/plugins/embrane/base_plugin.py +++ /dev/null @@ -1,375 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from heleosapi import backend_operations as h_op -from heleosapi import constants as h_con -from heleosapi import exceptions as h_exc -from oslo.config import cfg -from sqlalchemy.orm import exc - -from neutron.common import constants as l3_constants -from neutron.common import exceptions as neutron_exc -from neutron.db import extraroute_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.extensions import l3 -from neutron.openstack.common import log as logging -from neutron.plugins.embrane.agent import dispatcher -from neutron.plugins.embrane.common import config # noqa -from neutron.plugins.embrane.common import constants as p_con -from neutron.plugins.embrane.common import contexts as embrane_ctx -from neutron.plugins.embrane.common import operation -from neutron.plugins.embrane.common import utils - -LOG = logging.getLogger(__name__) -conf = cfg.CONF.heleos - - -class EmbranePlugin(object): - """Embrane Neutron plugin. - - uses the heleos(c) platform and a support L2 plugin to leverage networking - in cloud environments. - - """ - _l3super = extraroute_db.ExtraRoute_db_mixin - - def __init__(self): - pass - - def _run_embrane_config(self): - # read configurations - config_esm_mgmt = conf.esm_mgmt - config_admin_username = conf.admin_username - config_admin_password = conf.admin_password - config_router_image_id = conf.router_image - config_security_zones = {h_con.SzType.IB: conf.inband_id, - h_con.SzType.OOB: conf.oob_id, - h_con.SzType.MGMT: conf.mgmt_id, - h_con.SzType.DUMMY: conf.dummy_utif_id} - config_resource_pool = conf.resource_pool_id - self._embrane_async = conf.async_requests - self._esm_api = h_op.BackendOperations( - esm_mgmt=config_esm_mgmt, - admin_username=config_admin_username, - admin_password=config_admin_password, - router_image_id=config_router_image_id, - security_zones=config_security_zones, - resource_pool=config_resource_pool) - self._dispatcher = dispatcher.Dispatcher(self, self._embrane_async) - - def _make_router_dict(self, *args, **kwargs): - return self._l3super._make_router_dict(self, *args, **kwargs) - - def _delete_router(self, context, router_id): - self._l3super.delete_router(self, context, router_id) - - def _update_db_router_state(self, context, neutron_router, dva_state): - if not dva_state: - new_state = p_con.Status.ERROR - elif dva_state == h_con.DvaState.POWER_ON: - new_state = p_con.Status.ACTIVE - else: - new_state = p_con.Status.READY - self._set_db_router_state(context, neutron_router, new_state) - return new_state - - def _set_db_router_state(self, context, neutron_router, new_state): - return utils.set_db_item_state(context, neutron_router, new_state) - - def _update_db_interfaces_state(self, context, neutron_router): - router_ports = self.get_ports(context, - {"device_id": [neutron_router["id"]]}) - self._esm_api.update_ports_status(neutron_router["id"], router_ports) - for port in router_ports: - db_port = self._get_port(context, port["id"]) - db_port["status"] = port["status"] - context.session.merge(db_port) - - def _update_neutron_state(self, context, neutron_router, state): - try: - self._update_db_interfaces_state(context, neutron_router) - except Exception: - LOG.exception(_("Unhandled exception occurred")) - return self._set_db_router_state(context, neutron_router, state) - - def _retrieve_prefix_from_port(self, context, neutron_port): - subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] - subnet = utils.retrieve_subnet(context, subnet_id) - prefix = subnet["cidr"].split("/")[1] - return prefix - - # L3 extension - def create_router(self, context, router): - r = router["router"] - self._get_tenant_id_for_create(context, r) - db_router = self._l3super.create_router(self, context, router) - neutron_router = self._get_router(context, db_router['id']) - gw_port = neutron_router.gw_port - # For now, only small flavor is used - utif_info = (self._plugin_support.retrieve_utif_info(context, - gw_port) - if gw_port else None) - ip_allocation_info = (utils.retrieve_ip_allocation_info(context, - gw_port) - if gw_port else None) - neutron_router = self._l3super._get_router(self, context, - neutron_router["id"]) - neutron_router["status"] = p_con.Status.CREATING - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.CREATE_ROUTER, neutron_router, context, None), - args=(h_con.Flavor.SMALL, utif_info, ip_allocation_info)) - return self._make_router_dict(neutron_router) - - def update_router(self, context, id, router): - db_router = self._l3super.update_router(self, context, id, router) - neutron_router = self._get_router(context, db_router['id']) - gw_port = neutron_router.gw_port - utif_info = (self._plugin_support.retrieve_utif_info(context, - gw_port) - if gw_port else None) - ip_allocation_info = (utils.retrieve_ip_allocation_info(context, - gw_port) - if gw_port else None) - - routes_info = router["router"].get("routes") - - neutron_router = self._l3super._get_router(self, context, id) - state_change = operation.Operation( - self._set_db_router_state, - args=(context, neutron_router, p_con.Status.UPDATING)) - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.UPDATE_ROUTER, neutron_router, context, - state_change), - args=(utif_info, ip_allocation_info, routes_info)) - return self._make_router_dict(neutron_router) - - def get_router(self, context, id, fields=None): - """Ensures that id does exist in the ESM.""" - neutron_router = self._get_router(context, id) - - try: - if neutron_router["status"] != p_con.Status.CREATING: - self._esm_api.get_dva(id) - except h_exc.DvaNotFound: - - LOG.error(_("The following routers have not physical match: %s"), - id) - self._set_db_router_state(context, neutron_router, - p_con.Status.ERROR) - - LOG.debug(_("Requested router: %s"), neutron_router) - return self._make_router_dict(neutron_router, fields) - - def get_routers(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - """Retrieves the router list defined by the incoming filters.""" - router_query = self._apply_filters_to_query( - self._model_query(context, l3_db.Router), - l3_db.Router, filters) - id_list = [x["id"] for x in router_query - if x["status"] != p_con.Status.CREATING] - try: - self._esm_api.get_dvas(id_list) - except h_exc.DvaNotFound: - LOG.error(_("The following routers have not physical match: %s"), - repr(id_list)) - error_routers = [] - for id in id_list: - try: - error_routers.append(self._get_router(context, id)) - except l3.RouterNotFound: - pass - for error_router in error_routers: - self._set_db_router_state(context, error_router, - p_con.Status.ERROR) - return [self._make_router_dict(router, fields) - for router in router_query] - - def delete_router(self, context, id): - """Deletes the DVA with the specific router id.""" - # Copy of the parent validation code, shouldn't the base modules - # provide functions for validating operations? - device_owner_router_intf = l3_constants.DEVICE_OWNER_ROUTER_INTF - fips = self.get_floatingips_count(context.elevated(), - filters={"router_id": [id]}) - if fips: - raise l3.RouterInUse(router_id=id) - - device_filter = {"device_id": [id], - "device_owner": [device_owner_router_intf]} - ports = self.get_ports_count(context.elevated(), - filters=device_filter) - if ports: - raise l3.RouterInUse(router_id=id) - neutron_router = self._get_router(context, id) - state_change = operation.Operation(self._set_db_router_state, - args=(context, neutron_router, - p_con.Status.DELETING)) - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.DELETE_ROUTER, neutron_router, context, - state_change), args=()) - LOG.debug(_("Deleting router=%s"), neutron_router) - return neutron_router - - def add_router_interface(self, context, router_id, interface_info): - """Grows DVA interface in the specified subnet.""" - neutron_router = self._get_router(context, router_id) - rport_qry = context.session.query(models_v2.Port) - ports = rport_qry.filter_by( - device_id=router_id).all() - if len(ports) >= p_con.UTIF_LIMIT: - raise neutron_exc.BadRequest( - resource=router_id, - msg=("this router doesn't support more than " - + str(p_con.UTIF_LIMIT) + " interfaces")) - neutron_router_iface = self._l3super.add_router_interface( - self, context, router_id, interface_info) - port = self._get_port(context, neutron_router_iface["port_id"]) - utif_info = self._plugin_support.retrieve_utif_info(context, port) - ip_allocation_info = utils.retrieve_ip_allocation_info(context, - port) - state_change = operation.Operation(self._set_db_router_state, - args=(context, neutron_router, - p_con.Status.UPDATING)) - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.GROW_ROUTER_IF, neutron_router, context, - state_change), - args=(utif_info, ip_allocation_info)) - return neutron_router_iface - - def remove_router_interface(self, context, router_id, interface_info): - port_id = None - if "port_id" in interface_info: - port_id = interface_info["port_id"] - elif "subnet_id" in interface_info: - subnet_id = interface_info["subnet_id"] - subnet = utils.retrieve_subnet(context, subnet_id) - rport_qry = context.session.query(models_v2.Port) - ports = rport_qry.filter_by( - device_id=router_id, - device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF, - network_id=subnet["network_id"]) - for p in ports: - if p["fixed_ips"][0]["subnet_id"] == subnet_id: - port_id = p["id"] - break - neutron_router = self._get_router(context, router_id) - self._l3super.remove_router_interface(self, context, router_id, - interface_info) - state_change = operation.Operation(self._set_db_router_state, - args=(context, neutron_router, - p_con.Status.UPDATING)) - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.SHRINK_ROUTER_IF, neutron_router, context, - state_change), - args=(port_id,)) - - def create_floatingip(self, context, floatingip): - result = self._l3super.create_floatingip( - self, context, floatingip) - - if result["port_id"]: - neutron_router = self._get_router(context, result["router_id"]) - db_fixed_port = self._get_port(context, result["port_id"]) - fixed_prefix = self._retrieve_prefix_from_port(context, - db_fixed_port) - db_floating_port = neutron_router["gw_port"] - floating_prefix = self._retrieve_prefix_from_port( - context, db_floating_port) - nat_info = utils.retrieve_nat_info(context, result, - fixed_prefix, - floating_prefix, - neutron_router) - state_change = operation.Operation( - self._set_db_router_state, - args=(context, neutron_router, p_con.Status.UPDATING)) - - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.SET_NAT_RULE, neutron_router, context, - state_change), - args=(nat_info,)) - return result - - def update_floatingip(self, context, id, floatingip): - db_fip = self._l3super.get_floatingip(self, context, id) - result = self._l3super.update_floatingip(self, context, id, - floatingip) - - if db_fip["port_id"] and db_fip["port_id"] != result["port_id"]: - neutron_router = self._get_router(context, db_fip["router_id"]) - fip_id = db_fip["id"] - state_change = operation.Operation( - self._set_db_router_state, - args=(context, neutron_router, p_con.Status.UPDATING)) - - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.RESET_NAT_RULE, neutron_router, context, - state_change), - args=(fip_id,)) - if result["port_id"]: - neutron_router = self._get_router(context, result["router_id"]) - db_fixed_port = self._get_port(context, result["port_id"]) - fixed_prefix = self._retrieve_prefix_from_port(context, - db_fixed_port) - db_floating_port = neutron_router["gw_port"] - floating_prefix = self._retrieve_prefix_from_port( - context, db_floating_port) - nat_info = utils.retrieve_nat_info(context, result, - fixed_prefix, - floating_prefix, - neutron_router) - state_change = operation.Operation( - self._set_db_router_state, - args=(context, neutron_router, p_con.Status.UPDATING)) - - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.SET_NAT_RULE, neutron_router, context, - state_change), - args=(nat_info,)) - return result - - def disassociate_floatingips(self, context, port_id): - try: - fip_qry = context.session.query(l3_db.FloatingIP) - floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() - router_id = floating_ip["router_id"] - except exc.NoResultFound: - return - self._l3super.disassociate_floatingips(self, context, port_id) - if router_id: - neutron_router = self._get_router(context, router_id) - fip_id = floating_ip["id"] - state_change = operation.Operation( - self._set_db_router_state, - args=(context, neutron_router, p_con.Status.UPDATING)) - - self._dispatcher.dispatch_l3( - d_context=embrane_ctx.DispatcherContext( - p_con.Events.RESET_NAT_RULE, neutron_router, context, - state_change), - args=(fip_id,)) diff --git a/neutron/plugins/embrane/common/__init__.py b/neutron/plugins/embrane/common/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/common/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/common/config.py b/neutron/plugins/embrane/common/config.py deleted file mode 100644 index 54c9153f3..000000000 --- a/neutron/plugins/embrane/common/config.py +++ /dev/null @@ -1,49 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from oslo.config import cfg - - -heleos_opts = [ - cfg.StrOpt('esm_mgmt', - help=_('ESM management root address')), - cfg.StrOpt('admin_username', default='admin', - help=_('ESM admin username.')), - cfg.StrOpt('admin_password', - secret=True, - help=_('ESM admin password.')), - cfg.StrOpt('router_image', - help=_('Router image id (Embrane FW/VPN)')), - cfg.StrOpt('inband_id', - help=_('In band Security Zone id')), - cfg.StrOpt('oob_id', - help=_('Out of band Security Zone id')), - cfg.StrOpt('mgmt_id', - help=_('Management Security Zone id')), - cfg.StrOpt('dummy_utif_id', - help=_('Dummy user traffic Security Zone id')), - cfg.StrOpt('resource_pool_id', default='default', - help=_('Shared resource pool id')), - cfg.BoolOpt('async_requests', default=True, - help=_('Define if the requests have ' - 'run asynchronously or not')), -] - - -cfg.CONF.register_opts(heleos_opts, "heleos") diff --git a/neutron/plugins/embrane/common/constants.py b/neutron/plugins/embrane/common/constants.py deleted file mode 100644 index 65f3818a2..000000000 --- a/neutron/plugins/embrane/common/constants.py +++ /dev/null @@ -1,72 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from heleosapi import exceptions as h_exc - -from neutron.plugins.common import constants - - -# Router specific constants -UTIF_LIMIT = 7 -QUEUE_TIMEOUT = 300 - - -class Status: - # Transient - CREATING = constants.PENDING_CREATE - UPDATING = constants.PENDING_UPDATE - DELETING = constants.PENDING_DELETE - # Final - ACTIVE = constants.ACTIVE - ERROR = constants.ERROR - READY = constants.INACTIVE - DELETED = "DELETED" # not visible - - -class Events: - CREATE_ROUTER = "create_router" - UPDATE_ROUTER = "update_router" - DELETE_ROUTER = "delete_router" - GROW_ROUTER_IF = "grow_router_if" - SHRINK_ROUTER_IF = "shrink_router_if" - SET_NAT_RULE = "set_nat_rule" - RESET_NAT_RULE = "reset_nat_rule" - -_DVA_PENDING_ERROR_MSG = _("Dva is pending for the following reason: %s") -_DVA_NOT_FOUNT_ERROR_MSG = _("Dva can't be found to execute the operation, " - "probably was cancelled through the heleos UI") -_DVA_BROKEN_ERROR_MSG = _("Dva seems to be broken for reason %s") -_DVA_BROKEN_INTERFACE_ERROR_MSG = _("Dva interface seems to be broken " - "for reason %s") -_DVA_CREATION_FAILED_ERROR_MSG = _("Dva creation failed reason %s") -_DVA_CREATION_PENDING_ERROR_MSG = _("Dva creation is in pending state " - "for reason %s") -_CFG_FAILED_ERROR_MSG = _("Dva configuration failed for reason %s") -_DVA_DEL_FAILED_ERROR_MSG = _("Failed to delete the backend " - "router for reason %s. Please remove " - "it manually through the heleos UI") - -error_map = {h_exc.PendingDva: _DVA_PENDING_ERROR_MSG, - h_exc.DvaNotFound: _DVA_NOT_FOUNT_ERROR_MSG, - h_exc.BrokenDva: _DVA_BROKEN_ERROR_MSG, - h_exc.BrokenInterface: _DVA_BROKEN_INTERFACE_ERROR_MSG, - h_exc.DvaCreationFailed: _DVA_CREATION_FAILED_ERROR_MSG, - h_exc.DvaCreationPending: _DVA_CREATION_PENDING_ERROR_MSG, - h_exc.ConfigurationFailed: _CFG_FAILED_ERROR_MSG, - h_exc.DvaDeleteFailed: _DVA_DEL_FAILED_ERROR_MSG} diff --git a/neutron/plugins/embrane/common/contexts.py b/neutron/plugins/embrane/common/contexts.py deleted file mode 100644 index f35a02427..000000000 --- a/neutron/plugins/embrane/common/contexts.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - - -class DispatcherContext(object): - - def __init__(self, event, item, neutron_context, chain=None): - self.event = event - self.item = item - self.n_context = neutron_context - self.chain = chain - - -class OperationContext(DispatcherContext): - """Operational context. - - contains all the parameters needed to execute a status aware operation - - """ - def __init__(self, event, context, item, chain, function, args, kwargs): - super(OperationContext, self).__init__(event, item, context, chain) - self.function = function - self.args = args - self.kwargs = kwargs diff --git a/neutron/plugins/embrane/common/exceptions.py b/neutron/plugins/embrane/common/exceptions.py deleted file mode 100644 index f7cfa7b24..000000000 --- a/neutron/plugins/embrane/common/exceptions.py +++ /dev/null @@ -1,28 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from neutron.common import exceptions as neutron_exec - - -class EmbranePluginException(neutron_exec.NeutronException): - message = _("An unexpected error occurred:%(err_msg)s") - - -class UnsupportedException(EmbranePluginException): - message = _("%(err_msg)s") diff --git a/neutron/plugins/embrane/common/operation.py b/neutron/plugins/embrane/common/operation.py deleted file mode 100644 index 39fa413e2..000000000 --- a/neutron/plugins/embrane/common/operation.py +++ /dev/null @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - - -class Operation(object): - """Defines a series of operations which shall be executed in order. - - the operations expected are procedures, return values are discarded - - """ - - def __init__(self, procedure, args=(), kwargs={}, nextop=None): - self._procedure = procedure - self.args = args[:] - self.kwargs = dict(kwargs) - self.nextop = nextop - - def execute(self): - args = self.args - self._procedure(*args, **self.kwargs) - return self.nextop - - def execute_all(self): - nextop = self.execute() - while nextop: - nextop = self.execute_all() - - def has_next(self): - return self.nextop is not None - - def add_bottom_operation(self, operation): - op = self - while op.has_next(): - op = op.nextop - op.nextop = operation diff --git a/neutron/plugins/embrane/common/utils.py b/neutron/plugins/embrane/common/utils.py deleted file mode 100644 index 5fa20eb59..000000000 --- a/neutron/plugins/embrane/common/utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from heleosapi import info as h_info - -from neutron.common import constants -from neutron.db import models_v2 -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -def set_db_item_state(context, neutron_item, new_state): - with context.session.begin(subtransactions=True): - if neutron_item["status"] != new_state: - neutron_item["status"] = new_state - context.session.merge(neutron_item) - - -def retrieve_subnet(context, subnet_id): - return (context.session.query( - models_v2.Subnet).filter(models_v2.Subnet.id == subnet_id).one()) - - -def retrieve_ip_allocation_info(context, neutron_port): - """Retrieves ip allocation info for a specific port if any.""" - - try: - subnet_id = neutron_port["fixed_ips"][0]["subnet_id"] - except (KeyError, IndexError): - LOG.info(_("No ip allocation set")) - return - subnet = retrieve_subnet(context, subnet_id) - allocated_ip = neutron_port["fixed_ips"][0]["ip_address"] - is_gw_port = (neutron_port["device_owner"] == - constants.DEVICE_OWNER_ROUTER_GW) - gateway_ip = subnet["gateway_ip"] - - ip_allocation_info = h_info.IpAllocationInfo( - is_gw=is_gw_port, - ip_version=subnet["ip_version"], - prefix=subnet["cidr"].split("/")[1], - ip_address=allocated_ip, - port_id=neutron_port["id"], - gateway_ip=gateway_ip) - - return ip_allocation_info - - -def retrieve_nat_info(context, fip, fixed_prefix, floating_prefix, router): - nat_info = h_info.NatInfo(source_address=fip["floating_ip_address"], - source_prefix=floating_prefix, - destination_address=fip["fixed_ip_address"], - destination_prefix=fixed_prefix, - floating_ip_id=fip["id"], - fixed_port_id=fip["port_id"]) - return nat_info diff --git a/neutron/plugins/embrane/l2base/__init__.py b/neutron/plugins/embrane/l2base/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/l2base/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/fake/__init__.py b/neutron/plugins/embrane/l2base/fake/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/l2base/fake/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py b/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py deleted file mode 100644 index 5cf68df28..000000000 --- a/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py +++ /dev/null @@ -1,24 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from neutron.db import db_base_plugin_v2 - - -class FakeL2Plugin(db_base_plugin_v2.NeutronDbPluginV2): - supported_extension_aliases = [] diff --git a/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py b/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py deleted file mode 100644 index 7818d28de..000000000 --- a/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py +++ /dev/null @@ -1,45 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from heleosapi import info as h_info - -from neutron.common import constants -from neutron import manager -from neutron.plugins.embrane.l2base import support_base as base - - -class FakePluginSupport(base.SupportBase): - - def __init__(self): - super(FakePluginSupport, self).__init__() - - def retrieve_utif_info(self, context, neutron_port): - plugin = manager.NeutronManager.get_plugin() - network_id = neutron_port["network_id"] - network = plugin._get_network(context, network_id) - is_gw = (neutron_port["device_owner"] == - constants.DEVICE_OWNER_ROUTER_GW) - result = h_info.UtifInfo(vlan=0, - network_name=network["name"], - network_id=network["id"], - is_gw=is_gw, - owner_tenant=network["tenant_id"], - port_id=neutron_port["id"], - mac_address=neutron_port["mac_address"]) - return result diff --git a/neutron/plugins/embrane/l2base/openvswitch/__init__.py b/neutron/plugins/embrane/l2base/openvswitch/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/l2base/openvswitch/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py b/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py deleted file mode 100644 index f37a6b81a..000000000 --- a/neutron/plugins/embrane/l2base/openvswitch/openvswitch_support.py +++ /dev/null @@ -1,58 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from heleosapi import info as h_info - -from neutron.common import constants -from neutron import manager -from neutron.plugins.embrane.l2base import support_base as base -from neutron.plugins.embrane.l2base import support_exceptions as exc -from neutron.plugins.openvswitch import ovs_db_v2 - - -class OpenvswitchSupport(base.SupportBase): - """OpenVSwitch plugin support. - - Obtains the informations needed to build the user security zones - - """ - - def __init__(self): - super(OpenvswitchSupport, self).__init__() - - def retrieve_utif_info(self, context, neutron_port): - plugin = manager.NeutronManager.get_plugin() - session = context.session - network_id = neutron_port["network_id"] - network_binding = ovs_db_v2.get_network_binding(session, network_id) - if not network_binding["segmentation_id"]: - raise exc.UtifInfoError( - err_msg=_("No segmentation_id found for the network, " - "please be sure that tenant_network_type is vlan")) - network = plugin._get_network(context, network_id) - is_gw = (neutron_port["device_owner"] == - constants.DEVICE_OWNER_ROUTER_GW) - result = h_info.UtifInfo(vlan=network_binding["segmentation_id"], - network_name=network["name"], - network_id=network["id"], - is_gw=is_gw, - owner_tenant=network["tenant_id"], - port_id=neutron_port["id"], - mac_address=neutron_port["mac_address"]) - return result diff --git a/neutron/plugins/embrane/l2base/support_base.py b/neutron/plugins/embrane/l2base/support_base.py deleted file mode 100644 index a2b7e5342..000000000 --- a/neutron/plugins/embrane/l2base/support_base.py +++ /dev/null @@ -1,50 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class SupportBase(object): - """abstract support class. - - Defines the methods a plugin support should implement to be used as - the L2 base for Embrane plugin. - - """ - - @abc.abstractmethod - def __init__(self): - pass - - @abc.abstractmethod - def retrieve_utif_info(self, context, neutron_port=None, network=None): - """Retrieve specific network info. - - each plugin support, querying its own DB, can collect all the - information needed by the ESM in order to create the - user traffic security zone. - - :param interface_info: the foo parameter - :param context: neutron request context - :returns: heleosapi.info.UtifInfo -- specific network info - :raises: UtifInfoError - """ diff --git a/neutron/plugins/embrane/l2base/support_exceptions.py b/neutron/plugins/embrane/l2base/support_exceptions.py deleted file mode 100644 index 1c5c01322..000000000 --- a/neutron/plugins/embrane/l2base/support_exceptions.py +++ /dev/null @@ -1,25 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from neutron.plugins.embrane.common import exceptions as embrane_exc - - -class UtifInfoError(embrane_exc.EmbranePluginException): - message = _("Cannot retrieve utif info for the following reason: " - "%(err_msg)s") diff --git a/neutron/plugins/embrane/plugins/__init__.py b/neutron/plugins/embrane/plugins/__init__.py deleted file mode 100644 index 1fac4725b..000000000 --- a/neutron/plugins/embrane/plugins/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. diff --git a/neutron/plugins/embrane/plugins/embrane_fake_plugin.py b/neutron/plugins/embrane/plugins/embrane_fake_plugin.py deleted file mode 100644 index 69d972c54..000000000 --- a/neutron/plugins/embrane/plugins/embrane_fake_plugin.py +++ /dev/null @@ -1,34 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from neutron.db import extraroute_db -from neutron.plugins.embrane import base_plugin as base -from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2 -from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup - - -class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin, - l2.FakeL2Plugin): - _plugin_support = sup.FakePluginSupport() - - def __init__(self): - '''First run plugin specific initialization, then Embrane's.''' - self.supported_extension_aliases += ["extraroute", "router"] - l2.FakeL2Plugin.__init__(self) - self._run_embrane_config() diff --git a/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py b/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py deleted file mode 100644 index d4d5ac180..000000000 --- a/neutron/plugins/embrane/plugins/embrane_ovs_plugin.py +++ /dev/null @@ -1,38 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Embrane, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ivar Lazzaro, Embrane, Inc. - -from neutron.plugins.embrane import base_plugin as base -from neutron.plugins.embrane.l2base.openvswitch import openvswitch_support -from neutron.plugins.openvswitch import ovs_neutron_plugin as l2 - - -class EmbraneOvsPlugin(base.EmbranePlugin, l2.OVSNeutronPluginV2): - '''EmbraneOvsPlugin. - - This plugin uses OpenVSwitch specific L2 plugin for providing L2 networks - and the base EmbranePlugin for L3. - - ''' - _plugin_support = openvswitch_support.OpenvswitchSupport() - - def __init__(self): - '''First run plugin specific initialization, then Embrane's.''' - self._supported_extension_aliases.remove("l3_agent_scheduler") - l2.OVSNeutronPluginV2.__init__(self) - self._run_embrane_config() diff --git a/neutron/plugins/hyperv/__init__.py b/neutron/plugins/hyperv/__init__.py deleted file mode 100644 index 7ef4e09fa..000000000 --- a/neutron/plugins/hyperv/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/hyperv/agent/__init__.py b/neutron/plugins/hyperv/agent/__init__.py deleted file mode 100644 index 7ef4e09fa..000000000 --- a/neutron/plugins/hyperv/agent/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py b/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py deleted file mode 100644 index 07a5ed776..000000000 --- a/neutron/plugins/hyperv/agent/hyperv_neutron_agent.py +++ /dev/null @@ -1,475 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -#Copyright 2013 Cloudbase Solutions SRL -#Copyright 2013 Pedro Navarro Perez -#All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Pedro Navarro Perez -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -import platform -import re -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import constants as n_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.common import constants as p_const -from neutron.plugins.hyperv.agent import utils -from neutron.plugins.hyperv.agent import utilsfactory -from neutron.plugins.hyperv.common import constants - -LOG = logging.getLogger(__name__) - -agent_opts = [ - cfg.ListOpt( - 'physical_network_vswitch_mappings', - default=[], - help=_('List of : ' - 'where the physical networks can be expressed with ' - 'wildcards, e.g.: ."*:external"')), - cfg.StrOpt( - 'local_network_vswitch', - default='private', - help=_('Private vswitch name used for local networks')), - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), - cfg.BoolOpt('enable_metrics_collection', - default=False, - help=_('Enables metrics collections for switch ports by using ' - 'Hyper-V\'s metric APIs. Collected data can by ' - 'retrieved by other apps and services, e.g.: ' - 'Ceilometer. Requires Hyper-V / Windows Server 2012 ' - 'and above')), - cfg.IntOpt('metrics_max_retries', - default=100, - help=_('Specifies the maximum number of retries to enable ' - 'Hyper-V\'s port metrics collection. The agent will try ' - 'to enable the feature once every polling_interval ' - 'period for at most metrics_max_retries or until it ' - 'succeedes.')) -] - - -CONF = cfg.CONF -CONF.register_opts(agent_opts, "AGENT") -config.register_agent_state_opts_helper(cfg.CONF) - - -class HyperVSecurityAgent(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcMixin): - # Set RPC API version to 1.1 by default. - RPC_API_VERSION = '1.1' - - def __init__(self, context, plugin_rpc): - super(HyperVSecurityAgent, self).__init__() - self.context = context - self.plugin_rpc = plugin_rpc - - if sg_rpc.is_firewall_enabled(): - self.init_firewall() - self._setup_rpc() - - def _setup_rpc(self): - self.topic = topics.AGENT - self.endpoints = [HyperVSecurityCallbackMixin(self)] - consumers = [[topics.SECURITY_GROUP, topics.UPDATE]] - - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - -class HyperVSecurityCallbackMixin(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - # Set RPC API version to 1.1 by default. - RPC_API_VERSION = '1.1' - - def __init__(self, sg_agent): - super(HyperVSecurityCallbackMixin, self).__init__() - self.sg_agent = sg_agent - - -class HyperVPluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class HyperVNeutronAgent(rpc_compat.RpcCallback): - # Set RPC API version to 1.0 by default. - RPC_API_VERSION = '1.0' - - def __init__(self): - super(HyperVNeutronAgent, self).__init__() - self._utils = utilsfactory.get_hypervutils() - self._polling_interval = CONF.AGENT.polling_interval - self._load_physical_network_mappings() - self._network_vswitch_map = {} - self._port_metric_retries = {} - self._set_agent_state() - self._setup_rpc() - - def _set_agent_state(self): - self.agent_state = { - 'binary': 'neutron-hyperv-agent', - 'host': cfg.CONF.host, - 'topic': n_const.L2_AGENT_TOPIC, - 'configurations': {'vswitch_mappings': - self._physical_network_mappings}, - 'agent_type': n_const.AGENT_TYPE_HYPERV, - 'start_flag': True} - - def _report_state(self): - try: - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception as ex: - LOG.exception(_("Failed reporting state! %s"), ex) - - def _setup_rpc(self): - self.agent_id = 'hyperv_%s' % platform.node() - self.topic = topics.AGENT - self.plugin_rpc = HyperVPluginApi(topics.PLUGIN) - - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - - # RPC network init - self.context = context.get_admin_context_without_session() - # Handle updates from service - self.endpoints = [self] - # Define the listening consumers for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.NETWORK, topics.DELETE], - [topics.PORT, topics.DELETE], - [constants.TUNNEL, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - self.sec_groups_agent = HyperVSecurityAgent( - self.context, self.plugin_rpc) - report_interval = CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def _load_physical_network_mappings(self): - self._physical_network_mappings = {} - for mapping in CONF.AGENT.physical_network_vswitch_mappings: - parts = mapping.split(':') - if len(parts) != 2: - LOG.debug(_('Invalid physical network mapping: %s'), mapping) - else: - pattern = re.escape(parts[0].strip()).replace('\\*', '.*') - vswitch = parts[1].strip() - self._physical_network_mappings[pattern] = vswitch - - def _get_vswitch_for_physical_network(self, phys_network_name): - for pattern in self._physical_network_mappings: - if phys_network_name is None: - phys_network_name = '' - if re.match(pattern, phys_network_name): - return self._physical_network_mappings[pattern] - # Not found in the mappings, the vswitch has the same name - return phys_network_name - - def _get_network_vswitch_map_by_port_id(self, port_id): - for network_id, map in self._network_vswitch_map.iteritems(): - if port_id in map['ports']: - return (network_id, map) - - def network_delete(self, context, network_id=None): - LOG.debug(_("network_delete received. " - "Deleting network %s"), network_id) - # The network may not be defined on this agent - if network_id in self._network_vswitch_map: - self._reclaim_local_network(network_id) - else: - LOG.debug(_("Network %s not defined on agent."), network_id) - - def port_delete(self, context, port_id=None): - LOG.debug(_("port_delete received")) - self._port_unbound(port_id) - - def port_update(self, context, port=None, network_type=None, - segmentation_id=None, physical_network=None): - LOG.debug(_("port_update received")) - if CONF.SECURITYGROUP.enable_security_group: - if 'security_groups' in port: - self.sec_groups_agent.refresh_firewall() - - self._treat_vif_port( - port['id'], port['network_id'], - network_type, physical_network, - segmentation_id, port['admin_state_up']) - - def _get_vswitch_name(self, network_type, physical_network): - if network_type != p_const.TYPE_LOCAL: - vswitch_name = self._get_vswitch_for_physical_network( - physical_network) - else: - vswitch_name = CONF.AGENT.local_network_vswitch - return vswitch_name - - def _provision_network(self, port_id, - net_uuid, network_type, - physical_network, - segmentation_id): - LOG.info(_("Provisioning network %s"), net_uuid) - - vswitch_name = self._get_vswitch_name(network_type, physical_network) - - if network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT]: - #Nothing to do - pass - elif network_type == p_const.TYPE_LOCAL: - #TODO(alexpilotti): Check that the switch type is private - #or create it if not existing - pass - else: - raise utils.HyperVException( - msg=(_("Cannot provision unknown network type %(network_type)s" - " for network %(net_uuid)s") % - dict(network_type=network_type, net_uuid=net_uuid))) - - map = { - 'network_type': network_type, - 'vswitch_name': vswitch_name, - 'ports': [], - 'vlan_id': segmentation_id} - self._network_vswitch_map[net_uuid] = map - - def _reclaim_local_network(self, net_uuid): - LOG.info(_("Reclaiming local network %s"), net_uuid) - del self._network_vswitch_map[net_uuid] - - def _port_bound(self, port_id, - net_uuid, - network_type, - physical_network, - segmentation_id): - LOG.debug(_("Binding port %s"), port_id) - - if net_uuid not in self._network_vswitch_map: - self._provision_network( - port_id, net_uuid, network_type, - physical_network, segmentation_id) - - map = self._network_vswitch_map[net_uuid] - map['ports'].append(port_id) - - self._utils.connect_vnic_to_vswitch(map['vswitch_name'], port_id) - - if network_type == p_const.TYPE_VLAN: - LOG.info(_('Binding VLAN ID %(segmentation_id)s ' - 'to switch port %(port_id)s'), - dict(segmentation_id=segmentation_id, port_id=port_id)) - self._utils.set_vswitch_port_vlan_id( - segmentation_id, - port_id) - elif network_type == p_const.TYPE_FLAT: - #Nothing to do - pass - elif network_type == p_const.TYPE_LOCAL: - #Nothing to do - pass - else: - LOG.error(_('Unsupported network type %s'), network_type) - - if CONF.AGENT.enable_metrics_collection: - self._utils.enable_port_metrics_collection(port_id) - self._port_metric_retries[port_id] = CONF.AGENT.metrics_max_retries - - def _port_unbound(self, port_id): - (net_uuid, map) = self._get_network_vswitch_map_by_port_id(port_id) - if net_uuid not in self._network_vswitch_map: - LOG.info(_('Network %s is not avalailable on this agent'), - net_uuid) - return - - LOG.debug(_("Unbinding port %s"), port_id) - self._utils.disconnect_switch_port(map['vswitch_name'], port_id, True) - - if not map['ports']: - self._reclaim_local_network(net_uuid) - - def _port_enable_control_metrics(self): - if not CONF.AGENT.enable_metrics_collection: - return - - for port_id in self._port_metric_retries.keys(): - if self._utils.can_enable_control_metrics(port_id): - self._utils.enable_control_metrics(port_id) - LOG.info(_('Port metrics enabled for port: %s'), port_id) - del self._port_metric_retries[port_id] - elif self._port_metric_retries[port_id] < 1: - self._utils.enable_control_metrics(port_id) - LOG.error(_('Port metrics raw enabling for port: %s'), port_id) - del self._port_metric_retries[port_id] - else: - self._port_metric_retries[port_id] -= 1 - - def _update_ports(self, registered_ports): - ports = self._utils.get_vnic_ids() - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def _treat_vif_port(self, port_id, network_id, network_type, - physical_network, segmentation_id, - admin_state_up): - if self._utils.vnic_port_exists(port_id): - if admin_state_up: - self._port_bound(port_id, network_id, network_type, - physical_network, segmentation_id) - else: - self._port_unbound(port_id) - else: - LOG.debug(_("No port %s defined on agent."), port_id) - - def _treat_devices_added(self, devices): - resync = False - for device in devices: - LOG.info(_("Adding port %s"), device) - try: - device_details = self.plugin_rpc.get_device_details( - self.context, - device, - self.agent_id) - except Exception as e: - LOG.debug( - _("Unable to get port details for " - "device %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if 'port_id' in device_details: - LOG.info( - _("Port %(device)s updated. Details: %(device_details)s"), - {'device': device, 'device_details': device_details}) - self._treat_vif_port( - device_details['port_id'], - device_details['network_id'], - device_details['network_type'], - device_details['physical_network'], - device_details['segmentation_id'], - device_details['admin_state_up']) - - # check if security groups is enabled. - # if not, teardown the security group rules - if CONF.SECURITYGROUP.enable_security_group: - self.sec_groups_agent.prepare_devices_filter([device]) - else: - self._utils.remove_all_security_rules( - device_details['port_id']) - self.plugin_rpc.update_device_up(self.context, - device, - self.agent_id, - cfg.CONF.host) - return resync - - def _treat_devices_removed(self, devices): - resync = False - for device in devices: - LOG.info(_("Removing port %s"), device) - try: - self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug( - _("Removing port failed for device %(device)s: %(e)s"), - dict(device=device, e=e)) - resync = True - continue - self._port_unbound(device) - return resync - - def _process_network_ports(self, port_info): - resync_a = False - resync_b = False - if 'added' in port_info: - resync_a = self._treat_devices_added(port_info['added']) - if 'removed' in port_info: - resync_b = self._treat_devices_removed(port_info['removed']) - # If one of the above operations fails => resync with plugin - return (resync_a | resync_b) - - def daemon_loop(self): - sync = True - ports = set() - - while True: - try: - start = time.time() - if sync: - LOG.info(_("Agent out of sync with plugin!")) - ports.clear() - sync = False - - port_info = self._update_ports(ports) - - # notify plugin about port deltas - if port_info: - LOG.debug(_("Agent loop has new devices!")) - # If treat devices fails - must resync with plugin - sync = self._process_network_ports(port_info) - ports = port_info['current'] - - self._port_enable_control_metrics() - except Exception as e: - LOG.exception(_("Error in agent event loop: %s"), e) - sync = True - - # sleep till end of polling interval - elapsed = (time.time() - start) - if (elapsed < self._polling_interval): - time.sleep(self._polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)"), - {'polling_interval': self._polling_interval, - 'elapsed': elapsed}) - - -def main(): - common_config.init(sys.argv[1:]) - common_config.setup_logging(cfg.CONF) - - plugin = HyperVNeutronAgent() - - # Start everything. - LOG.info(_("Agent initialized successfully, now running... ")) - plugin.daemon_loop() diff --git a/neutron/plugins/hyperv/agent/security_groups_driver.py b/neutron/plugins/hyperv/agent/security_groups_driver.py deleted file mode 100644 index 755ab5270..000000000 --- a/neutron/plugins/hyperv/agent/security_groups_driver.py +++ /dev/null @@ -1,146 +0,0 @@ -#Copyright 2014 Cloudbase Solutions SRL -#All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Claudiu Belu, Cloudbase Solutions Srl - -from neutron.agent import firewall -from neutron.openstack.common import log as logging -from neutron.plugins.hyperv.agent import utilsfactory -from neutron.plugins.hyperv.agent import utilsv2 - -LOG = logging.getLogger(__name__) - - -class HyperVSecurityGroupsDriver(firewall.FirewallDriver): - """Security Groups Driver. - - Security Groups implementation for Hyper-V VMs. - """ - - _ACL_PROP_MAP = { - 'direction': {'ingress': utilsv2.HyperVUtilsV2._ACL_DIR_IN, - 'egress': utilsv2.HyperVUtilsV2._ACL_DIR_OUT}, - 'ethertype': {'IPv4': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV4, - 'IPv6': utilsv2.HyperVUtilsV2._ACL_TYPE_IPV6}, - 'protocol': {'icmp': utilsv2.HyperVUtilsV2._ICMP_PROTOCOL}, - 'default': "ANY", - 'address_default': {'IPv4': '0.0.0.0/0', 'IPv6': '::/0'} - } - - def __init__(self): - self._utils = utilsfactory.get_hypervutils() - self._security_ports = {} - - def prepare_port_filter(self, port): - LOG.debug('Creating port %s rules' % len(port['security_group_rules'])) - - # newly created port, add default rules. - if port['device'] not in self._security_ports: - LOG.debug('Creating default reject rules.') - self._utils.create_default_reject_all_rules(port['id']) - - self._security_ports[port['device']] = port - self._create_port_rules(port['id'], port['security_group_rules']) - - def _create_port_rules(self, port_id, rules): - for rule in rules: - param_map = self._create_param_map(rule) - try: - self._utils.create_security_rule(port_id, **param_map) - except Exception as ex: - LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' - 'adding rule: %(rule)s'), - dict(hyperv_exeption=ex, rule=rule)) - - def _remove_port_rules(self, port_id, rules): - for rule in rules: - param_map = self._create_param_map(rule) - try: - self._utils.remove_security_rule(port_id, **param_map) - except Exception as ex: - LOG.error(_('Hyper-V Exception: %(hyperv_exeption)s while ' - 'removing rule: %(rule)s'), - dict(hyperv_exeption=ex, rule=rule)) - - def _create_param_map(self, rule): - if 'port_range_min' in rule and 'port_range_max' in rule: - local_port = '%s-%s' % (rule['port_range_min'], - rule['port_range_max']) - else: - local_port = self._ACL_PROP_MAP['default'] - - return { - 'direction': self._ACL_PROP_MAP['direction'][rule['direction']], - 'acl_type': self._ACL_PROP_MAP['ethertype'][rule['ethertype']], - 'local_port': local_port, - 'protocol': self._get_rule_protocol(rule), - 'remote_address': self._get_rule_remote_address(rule) - } - - def apply_port_filter(self, port): - LOG.info(_('Aplying port filter.')) - - def update_port_filter(self, port): - LOG.info(_('Updating port rules.')) - - if port['device'] not in self._security_ports: - self.prepare_port_filter(port) - return - - old_port = self._security_ports[port['device']] - rules = old_port['security_group_rules'] - param_port_rules = port['security_group_rules'] - - new_rules = [r for r in param_port_rules if r not in rules] - remove_rules = [r for r in rules if r not in param_port_rules] - - LOG.info(_("Creating %(new)s new rules, removing %(old)s " - "old rules."), - {'new': len(new_rules), - 'old': len(remove_rules)}) - - self._remove_port_rules(old_port['id'], remove_rules) - self._create_port_rules(port['id'], new_rules) - - self._security_ports[port['device']] = port - - def remove_port_filter(self, port): - LOG.info(_('Removing port filter')) - self._security_ports.pop(port['device'], None) - - @property - def ports(self): - return self._security_ports - - def _get_rule_remote_address(self, rule): - if rule['direction'] is 'ingress': - ip_prefix = 'source_ip_prefix' - else: - ip_prefix = 'dest_ip_prefix' - - if ip_prefix in rule: - return rule[ip_prefix] - return self._ACL_PROP_MAP['address_default'][rule['ethertype']] - - def _get_rule_protocol(self, rule): - protocol = self._get_rule_prop_or_default(rule, 'protocol') - if protocol in self._ACL_PROP_MAP['protocol'].keys(): - return self._ACL_PROP_MAP['protocol'][protocol] - - return protocol - - def _get_rule_prop_or_default(self, rule, prop): - if prop in rule: - return rule[prop] - return self._ACL_PROP_MAP['default'] diff --git a/neutron/plugins/hyperv/agent/utils.py b/neutron/plugins/hyperv/agent/utils.py deleted file mode 100644 index 31439f0b0..000000000 --- a/neutron/plugins/hyperv/agent/utils.py +++ /dev/null @@ -1,256 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# Copyright 2013 Pedro Navarro Perez -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Pedro Navarro Perez -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -import sys -import time - -from oslo.config import cfg - -from neutron.common import exceptions as n_exc -from neutron.openstack.common import log as logging - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class HyperVException(n_exc.NeutronException): - message = _('HyperVException: %(msg)s') - -WMI_JOB_STATE_STARTED = 4096 -WMI_JOB_STATE_RUNNING = 4 -WMI_JOB_STATE_COMPLETED = 7 - - -class HyperVUtils(object): - - _ETHERNET_SWITCH_PORT = 'Msvm_SwitchPort' - - _wmi_namespace = '//./root/virtualization' - - def __init__(self): - self._wmi_conn = None - - @property - def _conn(self): - if self._wmi_conn is None: - self._wmi_conn = wmi.WMI(moniker=self._wmi_namespace) - return self._wmi_conn - - def get_switch_ports(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - vswitch_ports = vswitch.associators( - wmi_result_class=self._ETHERNET_SWITCH_PORT) - return set(p.Name for p in vswitch_ports) - - def vnic_port_exists(self, port_id): - try: - self._get_vnic_settings(port_id) - except Exception: - return False - return True - - def get_vnic_ids(self): - return set( - p.ElementName - for p in self._conn.Msvm_SyntheticEthernetPortSettingData() - if p.ElementName is not None) - - def _get_vnic_settings(self, vnic_name): - vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData( - ElementName=vnic_name) - if not vnic_settings: - raise HyperVException(msg=_('Vnic not found: %s') % vnic_name) - return vnic_settings[0] - - def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): - vnic_settings = self._get_vnic_settings(switch_port_name) - if not vnic_settings.Connection or not vnic_settings.Connection[0]: - port = self.get_port_by_id(switch_port_name, vswitch_name) - if port: - port_path = port.Path_() - else: - port_path = self._create_switch_port( - vswitch_name, switch_port_name) - vnic_settings.Connection = [port_path] - self._modify_virt_resource(vnic_settings) - - def _get_vm_from_res_setting_data(self, res_setting_data): - sd = res_setting_data.associators( - wmi_result_class='Msvm_VirtualSystemSettingData') - vm = sd[0].associators( - wmi_result_class='Msvm_ComputerSystem') - return vm[0] - - def _modify_virt_resource(self, res_setting_data): - vm = self._get_vm_from_res_setting_data(res_setting_data) - - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources( - vm.Path_(), [res_setting_data.GetText_(1)]) - self._check_job_status(ret_val, job_path) - - def _check_job_status(self, ret_val, jobpath): - """Poll WMI job state for completion.""" - if not ret_val: - return - elif ret_val not in [WMI_JOB_STATE_STARTED, WMI_JOB_STATE_RUNNING]: - raise HyperVException(msg=_('Job failed with error %d') % ret_val) - - job_wmi_path = jobpath.replace('\\', '/') - job = wmi.WMI(moniker=job_wmi_path) - - while job.JobState == WMI_JOB_STATE_RUNNING: - time.sleep(0.1) - job = wmi.WMI(moniker=job_wmi_path) - if job.JobState != WMI_JOB_STATE_COMPLETED: - job_state = job.JobState - if job.path().Class == "Msvm_ConcreteJob": - err_sum_desc = job.ErrorSummaryDescription - err_desc = job.ErrorDescription - err_code = job.ErrorCode - data = {'job_state': job_state, - 'err_sum_desc': err_sum_desc, - 'err_desc': err_desc, - 'err_code': err_code} - raise HyperVException( - msg=_("WMI job failed with status %(job_state)d. " - "Error details: %(err_sum_desc)s - %(err_desc)s - " - "Error code: %(err_code)d") % data) - else: - (error, ret_val) = job.GetError() - if not ret_val and error: - data = {'job_state': job_state, - 'error': error} - raise HyperVException( - msg=_("WMI job failed with status %(job_state)d. " - "Error details: %(error)s") % data) - else: - raise HyperVException( - msg=_("WMI job failed with status %d. " - "No error description available") % job_state) - - desc = job.Description - elap = job.ElapsedTime - LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"), - {'desc': desc, 'elap': elap}) - - def _create_switch_port(self, vswitch_name, switch_port_name): - """Creates a switch port.""" - switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - vswitch_path = self._get_vswitch(vswitch_name).path_() - (new_port, ret_val) = switch_svc.CreateSwitchPort( - Name=switch_port_name, - FriendlyName=switch_port_name, - ScopeOfResidence="", - VirtualSwitch=vswitch_path) - if ret_val != 0: - raise HyperVException( - msg=_('Failed creating port for %s') % vswitch_name) - return new_port - - def disconnect_switch_port( - self, vswitch_name, switch_port_name, delete_port): - """Disconnects the switch port.""" - switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - switch_port_path = self._get_switch_port_path_by_name( - switch_port_name) - if not switch_port_path: - # Port not found. It happens when the VM was already deleted. - return - - (ret_val, ) = switch_svc.DisconnectSwitchPort( - SwitchPort=switch_port_path) - if ret_val != 0: - data = {'switch_port_name': switch_port_name, - 'vswitch_name': vswitch_name, - 'ret_val': ret_val} - raise HyperVException( - msg=_('Failed to disconnect port %(switch_port_name)s ' - 'from switch %(vswitch_name)s ' - 'with error %(ret_val)s') % data) - if delete_port: - (ret_val, ) = switch_svc.DeleteSwitchPort( - SwitchPort=switch_port_path) - if ret_val != 0: - data = {'switch_port_name': switch_port_name, - 'vswitch_name': vswitch_name, - 'ret_val': ret_val} - raise HyperVException( - msg=_('Failed to delete port %(switch_port_name)s ' - 'from switch %(vswitch_name)s ' - 'with error %(ret_val)s') % data) - - def _get_vswitch(self, vswitch_name): - vswitch = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name) - if not vswitch: - raise HyperVException(msg=_('VSwitch not found: %s') % - vswitch_name) - return vswitch[0] - - def _get_vswitch_external_port(self, vswitch): - vswitch_ports = vswitch.associators( - wmi_result_class=self._ETHERNET_SWITCH_PORT) - for vswitch_port in vswitch_ports: - lan_endpoints = vswitch_port.associators( - wmi_result_class='Msvm_SwitchLanEndpoint') - if lan_endpoints: - ext_port = lan_endpoints[0].associators( - wmi_result_class='Msvm_ExternalEthernetPort') - if ext_port: - return vswitch_port - - def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): - vlan_endpoint_settings = self._conn.Msvm_VLANEndpointSettingData( - ElementName=switch_port_name)[0] - if vlan_endpoint_settings.AccessVLAN != vlan_id: - vlan_endpoint_settings.AccessVLAN = vlan_id - vlan_endpoint_settings.put() - - def _get_switch_port_path_by_name(self, switch_port_name): - vswitch = self._conn.Msvm_SwitchPort(ElementName=switch_port_name) - if vswitch: - return vswitch[0].path_() - - def get_vswitch_id(self, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - return vswitch.Name - - def get_port_by_id(self, port_id, vswitch_name): - vswitch = self._get_vswitch(vswitch_name) - switch_ports = vswitch.associators( - wmi_result_class=self._ETHERNET_SWITCH_PORT) - for switch_port in switch_ports: - if (switch_port.ElementName == port_id): - return switch_port - - def enable_port_metrics_collection(self, switch_port_name): - raise NotImplementedError(_("Metrics collection is not supported on " - "this version of Hyper-V")) - - def enable_control_metrics(self, switch_port_name): - raise NotImplementedError(_("Metrics collection is not supported on " - "this version of Hyper-V")) - - def can_enable_control_metrics(self, switch_port_name): - return False diff --git a/neutron/plugins/hyperv/agent/utilsfactory.py b/neutron/plugins/hyperv/agent/utilsfactory.py deleted file mode 100644 index 5698255c3..000000000 --- a/neutron/plugins/hyperv/agent/utilsfactory.py +++ /dev/null @@ -1,72 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Claudiu Belu, Cloudbase Solutions Srl - -import sys - -from oslo.config import cfg - -from neutron.openstack.common import log as logging -from neutron.plugins.hyperv.agent import utils -from neutron.plugins.hyperv.agent import utilsv2 - -# Check needed for unit testing on Unix -if sys.platform == 'win32': - import wmi - -hyper_opts = [ - cfg.BoolOpt('force_hyperv_utils_v1', - default=False, - help=_('Force V1 WMI utility classes')), -] - -CONF = cfg.CONF -CONF.register_opts(hyper_opts, 'hyperv') - -LOG = logging.getLogger(__name__) - - -def _get_windows_version(): - return wmi.WMI(moniker='//./root/cimv2').Win32_OperatingSystem()[0].Version - - -def _check_min_windows_version(major, minor, build=0): - version_str = _get_windows_version() - return map(int, version_str.split('.')) >= [major, minor, build] - - -def get_hypervutils(): - # V1 virtualization namespace features are supported up to - # Windows Server / Hyper-V Server 2012 - # V2 virtualization namespace features are supported starting with - # Windows Server / Hyper-V Server 2012 - # Windows Server / Hyper-V Server 2012 R2 uses the V2 namespace and - # introduces additional features - - force_v1_flag = CONF.hyperv.force_hyperv_utils_v1 - if _check_min_windows_version(6, 3): - if force_v1_flag: - LOG.warning(_('V1 virtualization namespace no longer supported on ' - 'Windows Server / Hyper-V Server 2012 R2 or above.')) - cls = utilsv2.HyperVUtilsV2R2 - elif not force_v1_flag and _check_min_windows_version(6, 2): - cls = utilsv2.HyperVUtilsV2 - else: - cls = utils.HyperVUtils - LOG.debug(_("Loading class: %(module_name)s.%(class_name)s"), - {'module_name': cls.__module__, 'class_name': cls.__name__}) - return cls() diff --git a/neutron/plugins/hyperv/agent/utilsv2.py b/neutron/plugins/hyperv/agent/utilsv2.py deleted file mode 100644 index a55839487..000000000 --- a/neutron/plugins/hyperv/agent/utilsv2.py +++ /dev/null @@ -1,439 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl -# @author: Claudiu Belu, Cloudbase Solutions Srl - -from neutron.plugins.hyperv.agent import utils - - -class HyperVUtilsV2(utils.HyperVUtils): - - _EXTERNAL_PORT = 'Msvm_ExternalEthernetPort' - _ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort' - _PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData' - _PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData' - _PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData' - _PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData' - _PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA - _LAN_ENDPOINT = 'Msvm_LANEndpoint' - _STATE_DISABLED = 3 - _OPERATION_MODE_ACCESS = 1 - - _VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData' - _VM_SUMMARY_ENABLED_STATE = 100 - _HYPERV_VM_STATE_ENABLED = 2 - - _ACL_DIR_IN = 1 - _ACL_DIR_OUT = 2 - - _ACL_TYPE_IPV4 = 2 - _ACL_TYPE_IPV6 = 3 - - _ACL_ACTION_ALLOW = 1 - _ACL_ACTION_DENY = 2 - _ACL_ACTION_METER = 3 - - _METRIC_ENABLED = 2 - _NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic' - _NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic' - - _ACL_APPLICABILITY_LOCAL = 1 - _ACL_APPLICABILITY_REMOTE = 2 - - _ACL_DEFAULT = 'ANY' - _IPV4_ANY = '0.0.0.0/0' - _IPV6_ANY = '::/0' - _TCP_PROTOCOL = 'tcp' - _UDP_PROTOCOL = 'udp' - _ICMP_PROTOCOL = '1' - _MAX_WEIGHT = 65500 - - # 2 directions x 2 address types = 4 ACLs - _REJECT_ACLS_COUNT = 4 - - _wmi_namespace = '//./root/virtualization/v2' - - def __init__(self): - super(HyperVUtilsV2, self).__init__() - - def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name): - vnic = self._get_vnic_settings(switch_port_name) - vswitch = self._get_vswitch(vswitch_name) - - port, found = self._get_switch_port_allocation(switch_port_name, True) - port.HostResource = [vswitch.path_()] - port.Parent = vnic.path_() - if not found: - vm = self._get_vm_from_res_setting_data(vnic) - self._add_virt_resource(vm, port) - else: - self._modify_virt_resource(port) - - def _modify_virt_resource(self, res_setting_data): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job_path, out_set_data, ret_val) = vs_man_svc.ModifyResourceSettings( - ResourceSettings=[res_setting_data.GetText_(1)]) - self._check_job_status(ret_val, job_path) - - def _add_virt_resource(self, vm, res_setting_data): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job_path, out_set_data, ret_val) = vs_man_svc.AddResourceSettings( - vm.path_(), [res_setting_data.GetText_(1)]) - self._check_job_status(ret_val, job_path) - - def _remove_virt_resource(self, res_setting_data): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job, ret_val) = vs_man_svc.RemoveResourceSettings( - ResourceSettings=[res_setting_data.path_()]) - self._check_job_status(ret_val, job) - - def _add_virt_feature(self, element, res_setting_data): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job_path, out_set_data, ret_val) = vs_man_svc.AddFeatureSettings( - element.path_(), [res_setting_data.GetText_(1)]) - self._check_job_status(ret_val, job_path) - - def _remove_virt_feature(self, feature_resource): - self._remove_multiple_virt_features([feature_resource]) - - def _remove_multiple_virt_features(self, feature_resources): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( - FeatureSettings=[f.path_() for f in feature_resources]) - self._check_job_status(ret_val, job_path) - - def disconnect_switch_port( - self, vswitch_name, switch_port_name, delete_port): - """Disconnects the switch port.""" - sw_port, found = self._get_switch_port_allocation(switch_port_name) - if not sw_port: - # Port not found. It happens when the VM was already deleted. - return - - if delete_port: - self._remove_virt_resource(sw_port) - else: - sw_port.EnabledState = self._STATE_DISABLED - self._modify_virt_resource(sw_port) - - def _get_vswitch(self, vswitch_name): - vswitch = self._conn.Msvm_VirtualEthernetSwitch( - ElementName=vswitch_name) - if not len(vswitch): - raise utils.HyperVException(msg=_('VSwitch not found: %s') % - vswitch_name) - return vswitch[0] - - def _get_vswitch_external_port(self, vswitch): - vswitch_ports = vswitch.associators( - wmi_result_class=self._ETHERNET_SWITCH_PORT) - for vswitch_port in vswitch_ports: - lan_endpoints = vswitch_port.associators( - wmi_result_class=self._LAN_ENDPOINT) - if len(lan_endpoints): - lan_endpoints = lan_endpoints[0].associators( - wmi_result_class=self._LAN_ENDPOINT) - if len(lan_endpoints): - ext_port = lan_endpoints[0].associators( - wmi_result_class=self._EXTERNAL_PORT) - if ext_port: - return vswitch_port - - def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name): - port_alloc, found = self._get_switch_port_allocation(switch_port_name) - if not found: - raise utils.HyperVException( - msg=_('Port Allocation not found: %s') % switch_port_name) - - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc) - if vlan_settings: - # Removing the feature because it cannot be modified - # due to a wmi exception. - (job_path, ret_val) = vs_man_svc.RemoveFeatureSettings( - FeatureSettings=[vlan_settings.path_()]) - self._check_job_status(ret_val, job_path) - - (vlan_settings, found) = self._get_vlan_setting_data(switch_port_name) - vlan_settings.AccessVlanId = vlan_id - vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS - (job_path, out, ret_val) = vs_man_svc.AddFeatureSettings( - port_alloc.path_(), [vlan_settings.GetText_(1)]) - self._check_job_status(ret_val, job_path) - - def _get_vlan_setting_data_from_port_alloc(self, port_alloc): - return self._get_first_item(port_alloc.associators( - wmi_result_class=self._PORT_VLAN_SET_DATA)) - - def _get_vlan_setting_data(self, switch_port_name, create=True): - return self._get_setting_data( - self._PORT_VLAN_SET_DATA, - switch_port_name, create) - - def _get_switch_port_allocation(self, switch_port_name, create=False): - return self._get_setting_data( - self._PORT_ALLOC_SET_DATA, - switch_port_name, create) - - def _get_setting_data(self, class_name, element_name, create=True): - element_name = element_name.replace("'", '"') - q = self._conn.query("SELECT * FROM %(class_name)s WHERE " - "ElementName = '%(element_name)s'" % - {"class_name": class_name, - "element_name": element_name}) - data = self._get_first_item(q) - found = data is not None - if not data and create: - data = self._get_default_setting_data(class_name) - data.ElementName = element_name - return data, found - - def _get_default_setting_data(self, class_name): - return self._conn.query("SELECT * FROM %s WHERE InstanceID " - "LIKE '%%\\Default'" % class_name)[0] - - def _get_first_item(self, obj): - if obj: - return obj[0] - - def enable_port_metrics_collection(self, switch_port_name): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - return - - # Add the ACLs only if they don't already exist - acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) - for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]: - for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: - _acls = self._filter_acls( - acls, self._ACL_ACTION_METER, acl_dir, acl_type) - - if not _acls: - acl = self._create_acl( - acl_dir, acl_type, self._ACL_ACTION_METER) - self._add_virt_feature(port, acl) - - def enable_control_metrics(self, switch_port_name): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - return - - metric_svc = self._conn.Msvm_MetricService()[0] - metric_names = [self._NET_IN_METRIC_NAME, self._NET_OUT_METRIC_NAME] - - for metric_name in metric_names: - metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name) - if metric_def: - metric_svc.ControlMetrics( - Subject=port.path_(), - Definition=metric_def[0].path_(), - MetricCollectionEnabled=self._METRIC_ENABLED) - - def can_enable_control_metrics(self, switch_port_name): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - return False - - if not self._is_port_vm_started(port): - return False - - # all 4 meter ACLs must be existent first. (2 x direction) - acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA) - acls = [a for a in acls if a.Action == self._ACL_ACTION_METER] - if len(acls) < 2: - return False - return True - - def _is_port_vm_started(self, port): - vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] - vmsettings = port.associators( - wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA) - #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx - (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( - [self._VM_SUMMARY_ENABLED_STATE], - [v.path_() for v in vmsettings]) - if ret_val or not summary_info: - raise utils.HyperVException(msg=_('Cannot get VM summary data ' - 'for: %s') % port.ElementName) - - return summary_info[0].EnabledState is self._HYPERV_VM_STATE_ENABLED - - def create_security_rule(self, switch_port_name, direction, acl_type, - local_port, protocol, remote_address): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - return - - # Add the ACLs only if they don't already exist - acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) - weight = self._get_new_weight(acls) - self._bind_security_rule( - port, direction, acl_type, self._ACL_ACTION_ALLOW, local_port, - protocol, remote_address, weight) - - def remove_security_rule(self, switch_port_name, direction, acl_type, - local_port, protocol, remote_address): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - # Port not found. It happens when the VM was already deleted. - return - - acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) - filtered_acls = self._filter_security_acls( - acls, self._ACL_ACTION_ALLOW, direction, acl_type, local_port, - protocol, remote_address) - - for acl in filtered_acls: - self._remove_virt_feature(acl) - - def remove_all_security_rules(self, switch_port_name): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - # Port not found. It happens when the VM was already deleted. - return - - acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) - filtered_acls = [a for a in acls if - a.Action is not self._ACL_ACTION_METER] - - if filtered_acls: - self._remove_multiple_virt_features(filtered_acls) - - def create_default_reject_all_rules(self, switch_port_name): - port, found = self._get_switch_port_allocation(switch_port_name, False) - if not found: - raise utils.HyperVException( - msg=_('Port Allocation not found: %s') % switch_port_name) - - acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) - filtered_acls = [v for v in acls if v.Action == self._ACL_ACTION_DENY] - - if len(filtered_acls) >= self._REJECT_ACLS_COUNT: - return - - for acl in filtered_acls: - self._remove_virt_feature(acl) - - weight = 0 - ipv4_pair = (self._ACL_TYPE_IPV4, self._IPV4_ANY) - ipv6_pair = (self._ACL_TYPE_IPV6, self._IPV6_ANY) - for direction in [self._ACL_DIR_IN, self._ACL_DIR_OUT]: - for acl_type, address in [ipv4_pair, ipv6_pair]: - for protocol in [self._TCP_PROTOCOL, - self._UDP_PROTOCOL, - self._ICMP_PROTOCOL]: - self._bind_security_rule( - port, direction, acl_type, self._ACL_ACTION_DENY, - self._ACL_DEFAULT, protocol, address, weight) - weight += 1 - - def _bind_security_rule(self, port, direction, acl_type, action, - local_port, protocol, remote_address, weight): - acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA) - filtered_acls = self._filter_security_acls( - acls, action, direction, acl_type, local_port, protocol, - remote_address) - - for acl in filtered_acls: - self._remove_virt_feature(acl) - - acl = self._create_security_acl( - direction, acl_type, action, local_port, protocol, remote_address, - weight) - - self._add_virt_feature(port, acl) - - def _create_acl(self, direction, acl_type, action): - acl = self._get_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA) - acl.set(Direction=direction, - AclType=acl_type, - Action=action, - Applicability=self._ACL_APPLICABILITY_LOCAL) - return acl - - def _create_security_acl(self, direction, acl_type, action, local_port, - protocol, remote_ip_address, weight): - acl = self._create_acl(direction, acl_type, action) - (remote_address, remote_prefix_length) = remote_ip_address.split('/') - acl.set(Applicability=self._ACL_APPLICABILITY_REMOTE, - RemoteAddress=remote_address, - RemoteAddressPrefixLength=remote_prefix_length) - return acl - - def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""): - return [v for v in acls - if v.Action == action and - v.Direction == direction and - v.AclType == acl_type and - v.RemoteAddress == remote_addr] - - def _filter_security_acls(self, acls, acl_action, direction, acl_type, - local_port, protocol, remote_addr=""): - (remote_address, remote_prefix_length) = remote_addr.split('/') - remote_prefix_length = int(remote_prefix_length) - - return [v for v in acls - if v.Direction == direction and - v.Action in [self._ACL_ACTION_ALLOW, self._ACL_ACTION_DENY] and - v.AclType == acl_type and - v.RemoteAddress == remote_address and - v.RemoteAddressPrefixLength == remote_prefix_length] - - def _get_new_weight(self, acls): - return 0 - - -class HyperVUtilsV2R2(HyperVUtilsV2): - _PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData' - _MAX_WEIGHT = 65500 - - # 2 directions x 2 address types x 3 protocols = 12 ACLs - _REJECT_ACLS_COUNT = 12 - - def _create_security_acl(self, direction, acl_type, action, local_port, - protocol, remote_addr, weight): - acl = self._get_default_setting_data(self._PORT_EXT_ACL_SET_DATA) - acl.set(Direction=direction, - Action=action, - LocalPort=str(local_port), - Protocol=protocol, - RemoteIPAddress=remote_addr, - IdleSessionTimeout=0, - Weight=weight) - return acl - - def _filter_security_acls(self, acls, action, direction, acl_type, - local_port, protocol, remote_addr=""): - return [v for v in acls - if v.Action == action and - v.Direction == direction and - v.LocalPort == str(local_port) and - v.Protocol == protocol and - v.RemoteIPAddress == remote_addr] - - def _get_new_weight(self, acls): - acls = [a for a in acls if a.Action is not self._ACL_ACTION_DENY] - if not acls: - return self._MAX_WEIGHT - 1 - - weights = [a.Weight for a in acls] - min_weight = min(weights) - for weight in range(min_weight, self._MAX_WEIGHT): - if weight not in weights: - return weight - - return min_weight - 1 diff --git a/neutron/plugins/hyperv/agent_notifier_api.py b/neutron/plugins/hyperv/agent_notifier_api.py deleted file mode 100644 index 058d96c4c..000000000 --- a/neutron/plugins/hyperv/agent_notifier_api.py +++ /dev/null @@ -1,80 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.openstack.common import log as logging -from neutron.plugins.hyperv.common import constants - -LOG = logging.getLogger(__name__) - - -class AgentNotifierApi(rpc_compat.RpcProxy): - '''Agent side of the openvswitch rpc API. - - API version history: - 1.0 - Initial version. - - ''' - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - self.topic_port_delete = topics.get_topic_name(topic, - topics.PORT, - topics.DELETE) - self.topic_tunnel_update = topics.get_topic_name(topic, - constants.TUNNEL, - topics.UPDATE) - - def network_delete(self, context, network_id): - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, network_type, segmentation_id, - physical_network): - self.fanout_cast(context, - self.make_msg('port_update', - port=port, - network_type=network_type, - segmentation_id=segmentation_id, - physical_network=physical_network), - topic=self.topic_port_update) - - def port_delete(self, context, port_id): - self.fanout_cast(context, - self.make_msg('port_delete', - port_id=port_id), - topic=self.topic_port_delete) - - def tunnel_update(self, context, tunnel_ip, tunnel_id): - self.fanout_cast(context, - self.make_msg('tunnel_update', - tunnel_ip=tunnel_ip, - tunnel_id=tunnel_id), - topic=self.topic_tunnel_update) diff --git a/neutron/plugins/hyperv/common/__init__.py b/neutron/plugins/hyperv/common/__init__.py deleted file mode 100644 index 7ef4e09fa..000000000 --- a/neutron/plugins/hyperv/common/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/hyperv/common/constants.py b/neutron/plugins/hyperv/common/constants.py deleted file mode 100644 index b36d9b559..000000000 --- a/neutron/plugins/hyperv/common/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -# Topic for tunnel notifications between the plugin and agent -TUNNEL = 'tunnel' - -# Special vlan_id value in ovs_vlan_allocations table indicating flat network -FLAT_VLAN_ID = -1 diff --git a/neutron/plugins/hyperv/db.py b/neutron/plugins/hyperv/db.py deleted file mode 100644 index 159275a85..000000000 --- a/neutron/plugins/hyperv/db.py +++ /dev/null @@ -1,219 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -from six import moves -from sqlalchemy.orm import exc - -from neutron.common import exceptions as n_exc -import neutron.db.api as db_api -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.hyperv.common import constants -from neutron.plugins.hyperv import model as hyperv_model - -LOG = logging.getLogger(__name__) - - -class HyperVPluginDB(object): - def initialize(self): - db_api.configure_db() - - def reserve_vlan(self, session): - with session.begin(subtransactions=True): - alloc_q = session.query(hyperv_model.VlanAllocation) - alloc_q = alloc_q.filter_by(allocated=False) - alloc = alloc_q.first() - if alloc: - LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': alloc.physical_network}) - alloc.allocated = True - return (alloc.physical_network, alloc.vlan_id) - raise n_exc.NoNetworkAvailable() - - def reserve_flat_net(self, session): - with session.begin(subtransactions=True): - alloc_q = session.query(hyperv_model.VlanAllocation) - alloc_q = alloc_q.filter_by(allocated=False, - vlan_id=constants.FLAT_VLAN_ID) - alloc = alloc_q.first() - if alloc: - LOG.debug(_("Reserving flat physical network " - "%(physical_network)s from pool"), - {'physical_network': alloc.physical_network}) - alloc.allocated = True - return alloc.physical_network - raise n_exc.NoNetworkAvailable() - - def reserve_specific_vlan(self, session, physical_network, vlan_id): - with session.begin(subtransactions=True): - try: - alloc_q = session.query(hyperv_model.VlanAllocation) - alloc_q = alloc_q.filter_by( - physical_network=physical_network, - vlan_id=vlan_id) - alloc = alloc_q.one() - if alloc.allocated: - if vlan_id == constants.FLAT_VLAN_ID: - raise n_exc.FlatNetworkInUse( - physical_network=physical_network) - else: - raise n_exc.VlanIdInUse( - vlan_id=vlan_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - alloc.allocated = True - except exc.NoResultFound: - raise n_exc.NoNetworkAvailable() - - def reserve_specific_flat_net(self, session, physical_network): - return self.reserve_specific_vlan(session, physical_network, - constants.FLAT_VLAN_ID) - - def add_network_binding(self, session, network_id, network_type, - physical_network, segmentation_id): - with session.begin(subtransactions=True): - binding = hyperv_model.NetworkBinding( - network_id, network_type, - physical_network, - segmentation_id) - session.add(binding) - - def get_port(self, port_id): - session = db_api.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - except exc.NoResultFound: - port = None - return port - - def get_network_binding(self, session, network_id): - session = session or db_api.get_session() - try: - binding_q = session.query(hyperv_model.NetworkBinding) - binding_q = binding_q.filter_by(network_id=network_id) - return binding_q.one() - except exc.NoResultFound: - return - - def set_port_status(self, port_id, status): - session = db_api.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.merge(port) - session.flush() - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) - - def release_vlan(self, session, physical_network, vlan_id): - with session.begin(subtransactions=True): - try: - alloc_q = session.query(hyperv_model.VlanAllocation) - alloc_q = alloc_q.filter_by(physical_network=physical_network, - vlan_id=vlan_id) - alloc = alloc_q.one() - alloc.allocated = False - #session.delete(alloc) - LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " - "%(physical_network)s"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - except exc.NoResultFound: - LOG.warning(_("vlan_id %(vlan_id)s on physical network " - "%(physical_network)s not found"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - - def _add_missing_allocatable_vlans(self, session, vlan_ids, - physical_network): - for vlan_id in sorted(vlan_ids): - alloc = hyperv_model.VlanAllocation( - physical_network, vlan_id) - session.add(alloc) - - def _remove_non_allocatable_vlans(self, session, - physical_network, - vlan_ids, - allocations): - if physical_network in allocations: - for alloc in allocations[physical_network]: - try: - # see if vlan is allocatable - vlan_ids.remove(alloc.vlan_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_( - "Removing vlan %(vlan_id)s on " - "physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': physical_network}) - session.delete(alloc) - del allocations[physical_network] - - def _remove_unconfigured_vlans(self, session, allocations): - for allocs in allocations.itervalues(): - for alloc in allocs: - if not alloc.allocated: - LOG.debug(_("Removing vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': alloc.physical_network}) - session.delete(alloc) - - def sync_vlan_allocations(self, network_vlan_ranges): - """Synchronize vlan_allocations table with configured VLAN ranges.""" - - session = db_api.get_session() - with session.begin(): - # get existing allocations for all physical networks - allocations = dict() - allocs_q = session.query(hyperv_model.VlanAllocation) - for alloc in allocs_q: - allocations.setdefault(alloc.physical_network, - set()).add(alloc) - - # process vlan ranges for each configured physical network - for physical_network, vlan_ranges in network_vlan_ranges.items(): - # determine current configured allocatable vlans for this - # physical network - vlan_ids = set() - for vlan_range in vlan_ranges: - vlan_ids |= set(moves.xrange(vlan_range[0], - vlan_range[1] + 1)) - - # remove from table unallocated vlans not currently allocatable - self._remove_non_allocatable_vlans(session, - physical_network, - vlan_ids, - allocations) - - # add missing allocatable vlans to table - self._add_missing_allocatable_vlans(session, vlan_ids, - physical_network) - - # remove from table unallocated vlans for any unconfigured physical - # networks - self._remove_unconfigured_vlans(session, allocations) diff --git a/neutron/plugins/hyperv/hyperv_neutron_plugin.py b/neutron/plugins/hyperv/hyperv_neutron_plugin.py deleted file mode 100644 index 4307e5133..000000000 --- a/neutron/plugins/hyperv/hyperv_neutron_plugin.py +++ /dev/null @@ -1,333 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -from oslo.config import cfg - -from neutron.api.v2 import attributes -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import l3_gwmode_db -from neutron.db import portbindings_base -from neutron.db import quota_db # noqa -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.common import utils as plugin_utils -from neutron.plugins.hyperv import agent_notifier_api -from neutron.plugins.hyperv.common import constants -from neutron.plugins.hyperv import db as hyperv_db -from neutron.plugins.hyperv import rpc_callbacks - - -DEFAULT_VLAN_RANGES = [] - -hyperv_opts = [ - cfg.StrOpt('tenant_network_type', default='local', - help=_("Network type for tenant networks " - "(local, flat, vlan or none)")), - cfg.ListOpt('network_vlan_ranges', - default=DEFAULT_VLAN_RANGES, - help=_("List of :: " - "or ")), -] - -cfg.CONF.register_opts(hyperv_opts, "HYPERV") - -LOG = logging.getLogger(__name__) - - -class BaseNetworkProvider(object): - def __init__(self): - self._db = hyperv_db.HyperVPluginDB() - - def create_network(self, session, attrs): - pass - - def delete_network(self, session, binding): - pass - - def extend_network_dict(self, network, binding): - pass - - -class LocalNetworkProvider(BaseNetworkProvider): - def create_network(self, session, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - if attributes.is_attr_set(segmentation_id): - msg = _("segmentation_id specified " - "for %s network") % network_type - raise n_exc.InvalidInput(error_message=msg) - attrs[provider.SEGMENTATION_ID] = None - - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - if attributes.is_attr_set(physical_network): - msg = _("physical_network specified " - "for %s network") % network_type - raise n_exc.InvalidInput(error_message=msg) - attrs[provider.PHYSICAL_NETWORK] = None - - def extend_network_dict(self, network, binding): - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = None - - -class FlatNetworkProvider(BaseNetworkProvider): - def create_network(self, session, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - if attributes.is_attr_set(segmentation_id): - msg = _("segmentation_id specified " - "for %s network") % network_type - raise n_exc.InvalidInput(error_message=msg) - segmentation_id = constants.FLAT_VLAN_ID - attrs[provider.SEGMENTATION_ID] = segmentation_id - - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - if not attributes.is_attr_set(physical_network): - physical_network = self._db.reserve_flat_net(session) - attrs[provider.PHYSICAL_NETWORK] = physical_network - else: - self._db.reserve_specific_flat_net(session, physical_network) - - def delete_network(self, session, binding): - self._db.release_vlan(session, binding.physical_network, - constants.FLAT_VLAN_ID) - - def extend_network_dict(self, network, binding): - network[provider.PHYSICAL_NETWORK] = binding.physical_network - - -class VlanNetworkProvider(BaseNetworkProvider): - def create_network(self, session, attrs): - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - if attributes.is_attr_set(segmentation_id): - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - if not attributes.is_attr_set(physical_network): - msg = _("physical_network not provided") - raise n_exc.InvalidInput(error_message=msg) - self._db.reserve_specific_vlan(session, physical_network, - segmentation_id) - else: - (physical_network, - segmentation_id) = self._db.reserve_vlan(session) - attrs[provider.SEGMENTATION_ID] = segmentation_id - attrs[provider.PHYSICAL_NETWORK] = physical_network - - def delete_network(self, session, binding): - self._db.release_vlan( - session, binding.physical_network, - binding.segmentation_id) - - def extend_network_dict(self, network, binding): - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = binding.segmentation_id - - -class HyperVNeutronPlugin(agents_db.AgentDbMixin, - db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - portbindings_base.PortBindingBaseMixin): - - # This attribute specifies whether the plugin supports or not - # bulk operations. Name mangling is used in order to ensure it - # is qualified by class - __native_bulk_support = True - supported_extension_aliases = ["provider", "external-net", "router", - "agent", "ext-gw-mode", "binding", "quotas"] - - def __init__(self, configfile=None): - self._db = hyperv_db.HyperVPluginDB() - self._db.initialize() - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_HYPERV} - portbindings_base.register_port_dict_function() - self._set_tenant_network_type() - - self._parse_network_vlan_ranges() - self._create_network_providers_map() - self._db.sync_vlan_allocations(self._network_vlan_ranges) - - self._setup_rpc() - - def _set_tenant_network_type(self): - tenant_network_type = cfg.CONF.HYPERV.tenant_network_type - if tenant_network_type not in [svc_constants.TYPE_LOCAL, - svc_constants.TYPE_FLAT, - svc_constants.TYPE_VLAN, - svc_constants.TYPE_NONE]: - msg = _( - "Invalid tenant_network_type: %s. " - "Agent terminated!") % tenant_network_type - raise n_exc.InvalidInput(error_message=msg) - self._tenant_network_type = tenant_network_type - - def _setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.notifier = agent_notifier_api.AgentNotifierApi( - topics.AGENT) - self.endpoints = [rpc_callbacks.HyperVRpcCallbacks(self.notifier), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _parse_network_vlan_ranges(self): - self._network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( - cfg.CONF.HYPERV.network_vlan_ranges) - LOG.info(_("Network VLAN ranges: %s"), self._network_vlan_ranges) - - def _check_vlan_id_in_range(self, physical_network, vlan_id): - for r in self._network_vlan_ranges[physical_network]: - if vlan_id >= r[0] and vlan_id <= r[1]: - return True - return False - - def _create_network_providers_map(self): - self._network_providers_map = { - svc_constants.TYPE_LOCAL: LocalNetworkProvider(), - svc_constants.TYPE_FLAT: FlatNetworkProvider(), - svc_constants.TYPE_VLAN: VlanNetworkProvider() - } - - def _process_provider_create(self, context, session, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - network_type_set = attributes.is_attr_set(network_type) - if not network_type_set: - if self._tenant_network_type == svc_constants.TYPE_NONE: - raise n_exc.TenantNetworksDisabled() - network_type = self._tenant_network_type - attrs[provider.NETWORK_TYPE] = network_type - - if network_type not in self._network_providers_map: - msg = _("Network type %s not supported") % network_type - raise n_exc.InvalidInput(error_message=msg) - p = self._network_providers_map[network_type] - # Provider specific network creation - p.create_network(session, attrs) - - def create_network(self, context, network): - session = context.session - with session.begin(subtransactions=True): - network_attrs = network['network'] - self._process_provider_create(context, session, network_attrs) - - net = super(HyperVNeutronPlugin, self).create_network( - context, network) - - network_type = network_attrs[provider.NETWORK_TYPE] - physical_network = network_attrs[provider.PHYSICAL_NETWORK] - segmentation_id = network_attrs[provider.SEGMENTATION_ID] - - self._db.add_network_binding( - session, net['id'], network_type, - physical_network, segmentation_id) - - self._process_l3_create(context, net, network['network']) - self._extend_network_dict_provider(context, net) - - LOG.debug(_("Created network: %s"), net['id']) - return net - - def _extend_network_dict_provider(self, context, network): - binding = self._db.get_network_binding( - context.session, network['id']) - network[provider.NETWORK_TYPE] = binding.network_type - p = self._network_providers_map[binding.network_type] - p.extend_network_dict(network, binding) - - def update_network(self, context, id, network): - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - net = super(HyperVNeutronPlugin, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - self._extend_network_dict_provider(context, net) - return net - - def delete_network(self, context, id): - session = context.session - with session.begin(subtransactions=True): - binding = self._db.get_network_binding(session, id) - self._process_l3_delete(context, id) - super(HyperVNeutronPlugin, self).delete_network(context, id) - p = self._network_providers_map[binding.network_type] - p.delete_network(session, binding) - # the network_binding record is deleted via cascade from - # the network record, so explicit removal is not necessary - self.notifier.network_delete(context, id) - - def get_network(self, context, id, fields=None): - net = super(HyperVNeutronPlugin, self).get_network(context, id, None) - self._extend_network_dict_provider(context, net) - return self._fields(net, fields) - - def get_networks(self, context, filters=None, fields=None): - nets = super(HyperVNeutronPlugin, self).get_networks( - context, filters, None) - for net in nets: - self._extend_network_dict_provider(context, net) - - return [self._fields(net, fields) for net in nets] - - def create_port(self, context, port): - port_data = port['port'] - port = super(HyperVNeutronPlugin, self).create_port(context, port) - self._process_portbindings_create_and_update(context, - port_data, - port) - return port - - def update_port(self, context, id, port): - original_port = super(HyperVNeutronPlugin, self).get_port( - context, id) - port_data = port['port'] - port = super(HyperVNeutronPlugin, self).update_port(context, id, port) - self._process_portbindings_create_and_update(context, - port_data, - port) - if original_port['admin_state_up'] != port['admin_state_up']: - binding = self._db.get_network_binding( - None, port['network_id']) - self.notifier.port_update(context, port, - binding.network_type, - binding.segmentation_id, - binding.physical_network) - return port - - def delete_port(self, context, id, l3_port_check=True): - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - self.disassociate_floatingips(context, id) - - super(HyperVNeutronPlugin, self).delete_port(context, id) - self.notifier.port_delete(context, id) diff --git a/neutron/plugins/hyperv/model.py b/neutron/plugins/hyperv/model.py deleted file mode 100644 index 808d2e591..000000000 --- a/neutron/plugins/hyperv/model.py +++ /dev/null @@ -1,55 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -from sqlalchemy import Boolean, Column, ForeignKey, Integer, String - -from neutron.db import model_base - - -class VlanAllocation(model_base.BASEV2): - """Represents allocation state of vlan_id on physical network.""" - __tablename__ = 'hyperv_vlan_allocations' - - physical_network = Column(String(64), nullable=False, primary_key=True) - vlan_id = Column(Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = Column(Boolean, nullable=False) - - def __init__(self, physical_network, vlan_id): - self.physical_network = physical_network - self.vlan_id = vlan_id - self.allocated = False - - -class NetworkBinding(model_base.BASEV2): - """Represents binding of virtual network to physical realization.""" - __tablename__ = 'hyperv_network_bindings' - - network_id = Column(String(36), - ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - network_type = Column(String(32), nullable=False) - physical_network = Column(String(64)) - segmentation_id = Column(Integer) - - def __init__(self, network_id, network_type, physical_network, - segmentation_id): - self.network_id = network_id - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id diff --git a/neutron/plugins/hyperv/rpc_callbacks.py b/neutron/plugins/hyperv/rpc_callbacks.py deleted file mode 100644 index 874059a58..000000000 --- a/neutron/plugins/hyperv/rpc_callbacks.py +++ /dev/null @@ -1,94 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 Cloudbase Solutions SRL -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Alessandro Pilotti, Cloudbase Solutions Srl - -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.db import dhcp_rpc_base -from neutron.db import l3_rpc_base -from neutron.openstack.common import log as logging -from neutron.plugins.hyperv import db as hyperv_db - - -LOG = logging.getLogger(__name__) - - -class HyperVRpcCallbacks( - rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin): - - # Set RPC API version to 1.0 by default. - RPC_API_VERSION = '1.1' - - def __init__(self, notifier): - super(HyperVRpcCallbacks, self).__init__() - self.notifier = notifier - self._db = hyperv_db.HyperVPluginDB() - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self._db.get_port(device) - if port: - binding = self._db.get_network_binding(None, port['network_id']) - entry = {'device': device, - 'network_id': port['network_id'], - 'port_id': port['id'], - 'admin_state_up': port['admin_state_up'], - 'network_type': binding.network_type, - 'segmentation_id': binding.segmentation_id, - 'physical_network': binding.physical_network} - # Set the port status to UP - self._db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) - else: - entry = {'device': device} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - # TODO(garyk) - live migration and port status - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self._db.get_port(device) - if port: - entry = {'device': device, - 'exists': True} - # Set port status to DOWN - self._db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) - else: - entry = {'device': device, - 'exists': False} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def tunnel_sync(self, rpc_context, **kwargs): - """Tunnel sync. - - Dummy function for ovs agent running on Linux to - work with Hyper-V plugin and agent. - """ - entry = dict() - entry['tunnels'] = {} - # Return the list of tunnels IP's to the agent - return entry diff --git a/neutron/plugins/ibm/README b/neutron/plugins/ibm/README deleted file mode 100644 index 732fd7776..000000000 --- a/neutron/plugins/ibm/README +++ /dev/null @@ -1,6 +0,0 @@ -IBM SDN-VE Neutron Plugin - -This plugin implements Neutron v2 APIs. - -For more details on how to use it please refer to the following page: -http://wiki.openstack.org/wiki/IBM-Neutron diff --git a/neutron/plugins/ibm/__init__.py b/neutron/plugins/ibm/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ibm/agent/__init__.py b/neutron/plugins/ibm/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py deleted file mode 100644 index e1c8d3ed7..000000000 --- a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - - -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg - -from neutron.agent.linux import ip_lib -from neutron.agent.linux import ovs_lib -from neutron.agent import rpc as agent_rpc -from neutron.common import config as common_config -from neutron.common import constants as n_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils as n_utils -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.ibm.common import config # noqa -from neutron.plugins.ibm.common import constants - - -LOG = logging.getLogger(__name__) - - -class SdnvePluginApi(agent_rpc.PluginApi): - - def sdnve_info(self, context, info): - return self.call(context, - self.make_msg('sdnve_info', info=info), - topic=self.topic) - - -class SdnveNeutronAgent(rpc_compat.RpcCallback): - - RPC_API_VERSION = '1.1' - - def __init__(self, integ_br, interface_mappings, - info, root_helper, polling_interval, - controller_ip, reset_br, out_of_band): - '''The agent initialization. - - Sets the following parameters and sets up the integration - bridge and physical interfaces if need be. - :param integ_br: name of the integration bridge. - :param interface_mappings: interfaces to physical networks. - :param info: local IP address of this hypervisor. - :param root_helper: utility to use when running shell cmds. - :param polling_interval: interval (secs) to poll DB. - :param controller_ip: Ip address of SDN-VE controller. - ''' - - super(SdnveNeutronAgent, self).__init__() - self.root_helper = root_helper - self.int_bridge_name = integ_br - self.controller_ip = controller_ip - self.interface_mappings = interface_mappings - self.polling_interval = polling_interval - self.info = info - self.reset_br = reset_br - self.out_of_band = out_of_band - - self.agent_state = { - 'binary': 'neutron-sdnve-agent', - 'host': cfg.CONF.host, - 'topic': n_const.L2_AGENT_TOPIC, - 'configurations': {'interface_mappings': interface_mappings, - 'reset_br': self.reset_br, - 'out_of_band': self.out_of_band, - 'controller_ip': self.controller_ip}, - 'agent_type': n_const.AGENT_TYPE_SDNVE, - 'start_flag': True} - - if self.int_bridge_name: - self.int_br = self.setup_integration_br(integ_br, reset_br, - out_of_band, - self.controller_ip) - self.setup_physical_interfaces(self.interface_mappings) - else: - self.int_br = None - - self.setup_rpc() - - def _report_state(self): - try: - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def setup_rpc(self): - if self.int_br: - mac = self.int_br.get_local_port_mac() - self.agent_id = '%s%s' % ('sdnve', (mac.replace(":", ""))) - else: - nameaddr = socket.gethostbyname(socket.gethostname()) - self.agent_id = '%s%s' % ('sdnve_', (nameaddr.replace(".", "_"))) - - self.topic = topics.AGENT - self.plugin_rpc = SdnvePluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - - self.context = context.get_admin_context_without_session() - self.endpoints = [self] - consumers = [[constants.INFO, topics.UPDATE]] - - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - if self.polling_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=self.polling_interval) - - # Plugin calls the agents through the following - def info_update(self, context, **kwargs): - LOG.debug(_("info_update received")) - info = kwargs.get('info', {}) - new_controller = info.get('new_controller') - out_of_band = info.get('out_of_band') - if self.int_br and new_controller: - LOG.debug(_("info_update received. New controller" - "is to be set to: %s"), new_controller) - self.int_br.run_vsctl(["set-controller", - self.int_bridge_name, - "tcp:" + new_controller]) - if out_of_band: - LOG.debug(_("info_update received. New controller" - "is set to be out of band")) - self.int_br.set_db_attribute("controller", - self.int_bridge_name, - "connection-mode", - "out-of-band") - - def setup_integration_br(self, bridge_name, reset_br, out_of_band, - controller_ip=None): - '''Sets up the integration bridge. - - Create the bridge and remove all existing flows if reset_br is True. - Otherwise, creates the bridge if not already existing. - :param bridge_name: the name of the integration bridge. - :param reset_br: A boolean to rest the bridge if True. - :param out_of_band: A boolean indicating controller is out of band. - :param controller_ip: IP address to use as the bridge controller. - :returns: the integration bridge - ''' - - int_br = ovs_lib.OVSBridge(bridge_name, self.root_helper) - if reset_br: - int_br.reset_bridge() - int_br.remove_all_flows() - else: - int_br.create() - - # set the controller - if controller_ip: - int_br.run_vsctl( - ["set-controller", bridge_name, "tcp:" + controller_ip]) - if out_of_band: - int_br.set_db_attribute("controller", bridge_name, - "connection-mode", "out-of-band") - - return int_br - - def setup_physical_interfaces(self, interface_mappings): - '''Sets up the physical network interfaces. - - Link physical interfaces to the integration bridge. - :param interface_mappings: map physical net names to interface names. - ''' - - for physical_network, interface in interface_mappings.iteritems(): - LOG.info(_("Mapping physical network %(physical_network)s to " - "interface %(interface)s"), - {'physical_network': physical_network, - 'interface': interface}) - # Connect the physical interface to the bridge - if not ip_lib.device_exists(interface, self.root_helper): - LOG.error(_("Interface %(interface)s for physical network " - "%(physical_network)s does not exist. Agent " - "terminated!"), - {'physical_network': physical_network, - 'interface': interface}) - raise SystemExit(1) - self.int_br.add_port(interface) - - def sdnve_info(self): - details = self.plugin_rpc.sdnve_info( - self.context, - {'info': self.info}) - return details - - def rpc_loop(self): - - while True: - start = time.time() - LOG.debug(_("Agent in the rpc loop.")) - - # sleep till end of polling interval - elapsed = (time.time() - start) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.info(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - - def daemon_loop(self): - self.rpc_loop() - - -def create_agent_config_map(config): - - interface_mappings = n_utils.parse_mappings( - config.SDNVE.interface_mappings) - - controller_ips = config.SDNVE.controller_ips - LOG.info(_("Controller IPs: %s"), controller_ips) - controller_ip = controller_ips[0] - - return { - 'integ_br': config.SDNVE.integration_bridge, - 'interface_mappings': interface_mappings, - 'controller_ip': controller_ip, - 'info': config.SDNVE.info, - 'root_helper': config.SDNVE_AGENT.root_helper, - 'polling_interval': config.SDNVE_AGENT.polling_interval, - 'reset_br': config.SDNVE.reset_bridge, - 'out_of_band': config.SDNVE.out_of_band} - - -def main(): - cfg.CONF.register_opts(ip_lib.OPTS) - common_config.init(sys.argv[1:]) - common_config.setup_logging(cfg.CONF) - - try: - agent_config = create_agent_config_map(cfg.CONF) - except ValueError as e: - LOG.exception(_("%s Agent terminated!"), e) - raise SystemExit(1) - - plugin = SdnveNeutronAgent(**agent_config) - - # Start everything. - LOG.info(_("Agent initialized successfully, now running... ")) - plugin.daemon_loop() diff --git a/neutron/plugins/ibm/common/__init__.py b/neutron/plugins/ibm/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ibm/common/config.py b/neutron/plugins/ibm/common/config.py deleted file mode 100644 index 68e2dbd42..000000000 --- a/neutron/plugins/ibm/common/config.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - - -from oslo.config import cfg - - -DEFAULT_INTERFACE_MAPPINGS = [] -DEFAULT_CONTROLLER_IPS = ['127.0.0.1'] - -sdnve_opts = [ - cfg.BoolOpt('use_fake_controller', default=False, - help=_("If set to True uses a fake controller.")), - cfg.StrOpt('base_url', default='/one/nb/v2/', - help=_("Base URL for SDN-VE controller REST API")), - cfg.ListOpt('controller_ips', default=DEFAULT_CONTROLLER_IPS, - help=_("List of IP addresses of SDN-VE controller(s)")), - cfg.StrOpt('info', default='sdnve_info_string', - help=_("SDN-VE RPC subject")), - cfg.StrOpt('port', default='8443', - help=_("SDN-VE controller port number")), - cfg.StrOpt('format', default='json', - help=_("SDN-VE request/response format")), - cfg.StrOpt('userid', default='admin', - help=_("SDN-VE administrator user id")), - cfg.StrOpt('password', default='admin', secret=True, - help=_("SDN-VE administrator password")), - cfg.StrOpt('integration_bridge', - help=_("Integration bridge to use")), - cfg.BoolOpt('reset_bridge', default=True, - help=_("Reset the integration bridge before use")), - cfg.BoolOpt('out_of_band', default=True, - help=_("Indicating if controller is out of band or not")), - cfg.ListOpt('interface_mappings', - default=DEFAULT_INTERFACE_MAPPINGS, - help=_("List of :")), - cfg.StrOpt('default_tenant_type', default='OVERLAY', - help=_("Tenant type: OVERLAY (default) or OF")), - cfg.StrOpt('overlay_signature', default='SDNVE-OVERLAY', - help=_("The string in tenant description that indicates " - "the tenant is a OVERLAY tenant")), - cfg.StrOpt('of_signature', default='SDNVE-OF', - help=_("The string in tenant description that indicates " - "the tenant is a OF tenant")), -] - -sdnve_agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("Agent polling interval if necessary")), - cfg.StrOpt('root_helper', default='sudo', - help=_("Using root helper")), - cfg.BoolOpt('rpc', default=True, - help=_("Whether using rpc")), - -] - - -cfg.CONF.register_opts(sdnve_opts, "SDNVE") -cfg.CONF.register_opts(sdnve_agent_opts, "SDNVE_AGENT") diff --git a/neutron/plugins/ibm/common/constants.py b/neutron/plugins/ibm/common/constants.py deleted file mode 100644 index 3acf9baff..000000000 --- a/neutron/plugins/ibm/common/constants.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - - -import httplib - -# Topic for info notifications between the plugin and agent -INFO = 'info' - -TENANT_TYPE_OF = 'OF' -TENANT_TYPE_OVERLAY = 'OVERLAY' - -HTTP_ACCEPTABLE = [httplib.OK, - httplib.CREATED, - httplib.ACCEPTED, - httplib.NO_CONTENT - ] diff --git a/neutron/plugins/ibm/common/exceptions.py b/neutron/plugins/ibm/common/exceptions.py deleted file mode 100644 index d2e5e7ed8..000000000 --- a/neutron/plugins/ibm/common/exceptions.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - -from neutron.common import exceptions - - -class SdnveException(exceptions.NeutronException): - message = _("An unexpected error occurred in the SDN-VE Plugin. " - "Here is the error message: %(msg)s") - - -class BadInputException(exceptions.BadRequest): - message = _("The input does not contain nececessary info: %(msg)s") diff --git a/neutron/plugins/ibm/sdnve_api.py b/neutron/plugins/ibm/sdnve_api.py deleted file mode 100644 index 50e689c1c..000000000 --- a/neutron/plugins/ibm/sdnve_api.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - - -import httplib -import urllib - -import httplib2 -from keystoneclient.v2_0 import client as keyclient -from oslo.config import cfg - -from neutron.api.v2 import attributes -from neutron.openstack.common import log as logging -from neutron.plugins.ibm.common import config # noqa -from neutron.plugins.ibm.common import constants -from neutron import wsgi - -LOG = logging.getLogger(__name__) - -SDNVE_VERSION = '2.0' -SDNVE_ACTION_PREFIX = '/sdnve' -SDNVE_RETRIES = 0 -SDNVE_RETRIY_INTERVAL = 1 -SDNVE_TENANT_TYPE_OVERLAY = u'DOVE' -SDNVE_URL = 'https://%s:%s%s' - - -class RequestHandler(object): - '''Handles processing requests to and responses from controller.''' - - def __init__(self, controller_ips=None, port=None, ssl=None, - base_url=None, userid=None, password=None, - timeout=10, formats=None): - '''Initializes the RequestHandler for communication with controller - - Following keyword arguments are used; if not specified, default - values are used. - :param port: Username for authentication. - :param timeout: Time out for http requests. - :param userid: User id for accessing controller. - :param password: Password for accessing the controller. - :param base_url: The base url for the controller. - :param controller_ips: List of controller IP addresses. - :param formats: Supported formats. - ''' - self.port = port or cfg.CONF.SDNVE.port - self.timeout = timeout - self._s_meta = None - self.connection = None - self.httpclient = httplib2.Http( - disable_ssl_certificate_validation=True) - self.cookie = None - - userid = userid or cfg.CONF.SDNVE.userid - password = password or cfg.CONF.SDNVE.password - if (userid and password): - self.httpclient.add_credentials(userid, password) - - self.base_url = base_url or cfg.CONF.SDNVE.base_url - self.controller_ips = controller_ips or cfg.CONF.SDNVE.controller_ips - - LOG.info(_("The IP addr of available SDN-VE controllers: %s"), - self.controller_ips) - self.controller_ip = self.controller_ips[0] - LOG.info(_("The SDN-VE controller IP address: %s"), - self.controller_ip) - - self.new_controller = False - self.format = formats or cfg.CONF.SDNVE.format - - self.version = SDNVE_VERSION - self.action_prefix = SDNVE_ACTION_PREFIX - self.retries = SDNVE_RETRIES - self.retry_interval = SDNVE_RETRIY_INTERVAL - - def serialize(self, data): - '''Serializes a dictionary with a single key.''' - - if isinstance(data, dict): - return wsgi.Serializer().serialize(data, self.content_type()) - elif data: - raise TypeError(_("unable to serialize object type: '%s'") % - type(data)) - - def deserialize(self, data, status_code): - '''Deserializes an xml or json string into a dictionary.''' - - # NOTE(mb): Temporary fix for backend controller requirement - data = data.replace("router_external", "router:external") - - if status_code == httplib.NO_CONTENT: - return data - try: - deserialized_data = wsgi.Serializer( - metadata=self._s_meta).deserialize(data, self.content_type()) - deserialized_data = deserialized_data['body'] - except Exception: - deserialized_data = data - - return deserialized_data - - def content_type(self, format=None): - '''Returns the mime-type for either 'xml' or 'json'.''' - - return 'application/%s' % (format or self.format) - - def delete(self, url, body=None, headers=None, params=None): - return self.do_request("DELETE", url, body=body, - headers=headers, params=params) - - def get(self, url, body=None, headers=None, params=None): - return self.do_request("GET", url, body=body, - headers=headers, params=params) - - def post(self, url, body=None, headers=None, params=None): - return self.do_request("POST", url, body=body, - headers=headers, params=params) - - def put(self, url, body=None, headers=None, params=None): - return self.do_request("PUT", url, body=body, - headers=headers, params=params) - - def do_request(self, method, url, body=None, headers=None, - params=None, connection_type=None): - - status_code = -1 - replybody_deserialized = '' - - if body: - body = self.serialize(body) - - self.headers = headers or {'Content-Type': self.content_type()} - if self.cookie: - self.headers['cookie'] = self.cookie - - if self.controller_ip != self.controller_ips[0]: - controllers = [self.controller_ip] - else: - controllers = [] - controllers.extend(self.controller_ips) - - for controller_ip in controllers: - serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url) - myurl = serverurl + url - if params and isinstance(params, dict): - myurl += '?' + urllib.urlencode(params, doseq=1) - - try: - LOG.debug(_("Sending request to SDN-VE. url: " - "%(myurl)s method: %(method)s body: " - "%(body)s header: %(header)s "), - {'myurl': myurl, 'method': method, - 'body': body, 'header': self.headers}) - resp, replybody = self.httpclient.request( - myurl, method=method, body=body, headers=self.headers) - LOG.debug(("Response recd from SDN-VE. resp: %(resp)s" - "body: %(body)s"), - {'resp': resp.status, 'body': replybody}) - status_code = resp.status - - except Exception as e: - LOG.error(_("Error: Could not reach server: %(url)s " - "Exception: %(excp)s."), - {'url': myurl, 'excp': e}) - self.cookie = None - continue - - if status_code not in constants.HTTP_ACCEPTABLE: - LOG.debug(_("Error message: %(reply)s -- Status: %(status)s"), - {'reply': replybody, 'status': status_code}) - else: - LOG.debug(_("Received response status: %s"), status_code) - - if resp.get('set-cookie'): - self.cookie = resp['set-cookie'] - replybody_deserialized = self.deserialize( - replybody, - status_code) - LOG.debug(_("Deserialized body: %s"), replybody_deserialized) - if controller_ip != self.controller_ip: - # bcast the change of controller - self.new_controller = True - self.controller_ip = controller_ip - - return (status_code, replybody_deserialized) - - return (httplib.REQUEST_TIMEOUT, 'Could not reach server(s)') - - -class Client(RequestHandler): - '''Client for SDNVE controller.''' - - def __init__(self): - '''Initialize a new SDNVE client.''' - super(Client, self).__init__() - - self.keystoneclient = KeystoneClient() - - resource_path = { - 'network': "ln/networks/", - 'subnet': "ln/subnets/", - 'port': "ln/ports/", - 'tenant': "ln/tenants/", - 'router': "ln/routers/", - 'floatingip': "ln/floatingips/", - } - - def process_request(self, body): - '''Processes requests according to requirements of controller.''' - if self.format == 'json': - body = dict( - (k.replace(':', '_'), v) for k, v in body.items() - if attributes.is_attr_set(v)) - return body - - def sdnve_list(self, resource, **params): - '''Fetches a list of resources.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_("Bad resource for forming a list request")) - return 0, '' - - return self.get(res, params=params) - - def sdnve_show(self, resource, specific, **params): - '''Fetches information of a certain resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_("Bad resource for forming a show request")) - return 0, '' - - return self.get(res + specific, params=params) - - def sdnve_create(self, resource, body): - '''Creates a new resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_("Bad resource for forming a create request")) - return 0, '' - - body = self.process_request(body) - status, data = self.post(res, body=body) - return (status, data) - - def sdnve_update(self, resource, specific, body=None): - '''Updates a resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_("Bad resource for forming a update request")) - return 0, '' - - body = self.process_request(body) - return self.put(res + specific, body=body) - - def sdnve_delete(self, resource, specific): - '''Deletes the specified resource.''' - - res = self.resource_path.get(resource, None) - if not res: - LOG.info(_("Bad resource for forming a delete request")) - return 0, '' - - return self.delete(res + specific) - - def _tenant_id_conversion(self, osid): - return osid - - def sdnve_get_tenant_byid(self, os_tenant_id): - sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) - resp, content = self.sdnve_show('tenant', sdnve_tenant_id) - if resp in constants.HTTP_ACCEPTABLE: - tenant_id = content.get('id') - tenant_type = content.get('network_type') - if tenant_type == SDNVE_TENANT_TYPE_OVERLAY: - tenant_type = constants.TENANT_TYPE_OVERLAY - return tenant_id, tenant_type - return None, None - - def sdnve_check_and_create_tenant(self, os_tenant_id, network_type=None): - - if not os_tenant_id: - return - tenant_id, tenant_type = self.sdnve_get_tenant_byid(os_tenant_id) - if tenant_id: - if not network_type: - return tenant_id - if tenant_type != network_type: - LOG.info(_("Non matching tenant and network types: " - "%(ttype)s %(ntype)s"), - {'ttype': tenant_type, 'ntype': network_type}) - return - return tenant_id - - # Have to create a new tenant - sdnve_tenant_id = self._tenant_id_conversion(os_tenant_id) - if not network_type: - network_type = self.keystoneclient.get_tenant_type(os_tenant_id) - if network_type == constants.TENANT_TYPE_OVERLAY: - network_type = SDNVE_TENANT_TYPE_OVERLAY - - pinn_desc = ("Created by SDN-VE Neutron Plugin, OS project name = " + - self.keystoneclient.get_tenant_name(os_tenant_id)) - - res, content = self.sdnve_create('tenant', - {'id': sdnve_tenant_id, - 'name': os_tenant_id, - 'network_type': network_type, - 'description': pinn_desc}) - if res not in constants.HTTP_ACCEPTABLE: - return - - return sdnve_tenant_id - - def sdnve_get_controller(self): - if self.new_controller: - self.new_controller = False - return self.controller_ip - - -class KeystoneClient(object): - - def __init__(self, username=None, tenant_name=None, password=None, - auth_url=None): - - keystone_conf = cfg.CONF.keystone_authtoken - keystone_auth_url = ('%s://%s:%s/v2.0/' % - (keystone_conf.auth_protocol, - keystone_conf.auth_host, - keystone_conf.auth_port)) - - username = username or keystone_conf.admin_user - tenant_name = tenant_name or keystone_conf.admin_tenant_name - password = password or keystone_conf.admin_password - auth_url = auth_url or keystone_auth_url - - self.overlay_signature = cfg.CONF.SDNVE.overlay_signature - self.of_signature = cfg.CONF.SDNVE.of_signature - self.default_tenant_type = cfg.CONF.SDNVE.default_tenant_type - - self.client = keyclient.Client(username=username, - password=password, - tenant_name=tenant_name, - auth_url=auth_url) - - def get_tenant_byid(self, id): - - try: - return self.client.tenants.get(id) - except Exception: - LOG.exception(_("Did not find tenant: %r"), id) - - def get_tenant_type(self, id): - - tenant = self.get_tenant_byid(id) - if tenant: - description = tenant.description - if description: - if (description.find(self.overlay_signature) >= 0): - return constants.TENANT_TYPE_OVERLAY - if (description.find(self.of_signature) >= 0): - return constants.TENANT_TYPE_OF - return self.default_tenant_type - - def get_tenant_name(self, id): - - tenant = self.get_tenant_byid(id) - if tenant: - return tenant.name - return 'not found' diff --git a/neutron/plugins/ibm/sdnve_api_fake.py b/neutron/plugins/ibm/sdnve_api_fake.py deleted file mode 100644 index 74cfc8386..000000000 --- a/neutron/plugins/ibm/sdnve_api_fake.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - -from neutron.openstack.common import log as logging -from neutron.plugins.ibm.common import constants - -LOG = logging.getLogger(__name__) - -HTTP_OK = 200 - - -class FakeClient(): - - '''Fake Client for SDNVE controller.''' - - def __init__(self, **kwargs): - LOG.info(_('Fake SDNVE controller initialized')) - - def sdnve_list(self, resource, **_params): - LOG.info(_('Fake SDNVE controller: list')) - return (HTTP_OK, None) - - def sdnve_show(self, resource, specific, **_params): - LOG.info(_('Fake SDNVE controller: show')) - return (HTTP_OK, None) - - def sdnve_create(self, resource, body): - LOG.info(_('Fake SDNVE controller: create')) - return (HTTP_OK, None) - - def sdnve_update(self, resource, specific, body=None): - LOG.info(_('Fake SDNVE controller: update')) - return (HTTP_OK, None) - - def sdnve_delete(self, resource, specific): - LOG.info(_('Fake SDNVE controller: delete')) - return (HTTP_OK, None) - - def sdnve_get_tenant_byid(self, id): - LOG.info(_('Fake SDNVE controller: get tenant by id')) - return id, constants.TENANT_TYPE_OF - - def sdnve_check_and_create_tenant(self, id, network_type=None): - LOG.info(_('Fake SDNVE controller: check and create tenant')) - return id - - def sdnve_get_controller(self): - LOG.info(_('Fake SDNVE controller: get controller')) - return None diff --git a/neutron/plugins/ibm/sdnve_neutron_plugin.py b/neutron/plugins/ibm/sdnve_neutron_plugin.py deleted file mode 100644 index cf127f001..000000000 --- a/neutron/plugins/ibm/sdnve_neutron_plugin.py +++ /dev/null @@ -1,666 +0,0 @@ -# Copyright 2014 IBM Corp. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mohammad Banikazemi, IBM Corp. - - -import functools - -from oslo.config import cfg - -from neutron.common import constants as n_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import l3_gwmode_db -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.ibm.common import config # noqa -from neutron.plugins.ibm.common import constants -from neutron.plugins.ibm.common import exceptions as sdnve_exc -from neutron.plugins.ibm import sdnve_api as sdnve -from neutron.plugins.ibm import sdnve_api_fake as sdnve_fake - -LOG = logging.getLogger(__name__) - - -class SdnveRpcCallbacks(): - - def __init__(self, notifier): - self.notifier = notifier # used to notify the agent - - def sdnve_info(self, rpc_context, **kwargs): - '''Update new information.''' - info = kwargs.get('info') - # Notify all other listening agents - self.notifier.info_update(rpc_context, info) - return info - - -class AgentNotifierApi(rpc_compat.RpcProxy): - '''Agent side of the SDN-VE rpc API.''' - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - - self.topic_info_update = topics.get_topic_name(topic, - constants.INFO, - topics.UPDATE) - - def info_update(self, context, info): - self.fanout_cast(context, - self.make_msg('info_update', - info=info), - topic=self.topic_info_update) - - -def _ha(func): - '''Supports the high availability feature of the controller.''' - - @functools.wraps(func) - def hawrapper(self, *args, **kwargs): - '''This wrapper sets the new controller if necessary - - When a controller is detected to be not responding, and a - new controller is chosen to be used in its place, this decorator - makes sure the existing integration bridges are set to point - to the new controller by calling the set_controller method. - ''' - ret_func = func(self, *args, **kwargs) - self.set_controller(args[0]) - return ret_func - return hawrapper - - -class SdnvePluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - portbindings_db.PortBindingMixin, - l3_gwmode_db.L3_NAT_db_mixin, - agents_db.AgentDbMixin, - ): - - ''' - Implement the Neutron abstractions using SDN-VE SDN Controller. - ''' - - __native_bulk_support = False - __native_pagination_support = False - __native_sorting_support = False - - supported_extension_aliases = ["binding", "router", "external-net", - "agent", "quotas"] - - def __init__(self, configfile=None): - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: {portbindings.CAP_PORT_FILTER: False}} - - super(SdnvePluginV2, self).__init__() - self.setup_rpc() - self.sdnve_controller_select() - if self.fake_controller: - self.sdnve_client = sdnve_fake.FakeClient() - else: - self.sdnve_client = sdnve.Client() - - def sdnve_controller_select(self): - self.fake_controller = cfg.CONF.SDNVE.use_fake_controller - - def setup_rpc(self): - # RPC support - self.topic = topics.PLUGIN - self.conn = rpc_compat.create_connection(new=True) - self.notifier = AgentNotifierApi(topics.AGENT) - self.endpoints = [SdnveRpcCallbacks(self.notifier), - agents_db.AgentExtRpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _update_base_binding_dict(self, tenant_type): - if tenant_type == constants.TENANT_TYPE_OVERLAY: - self.base_binding_dict[ - portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE - if tenant_type == constants.TENANT_TYPE_OF: - self.base_binding_dict[ - portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS - - def set_controller(self, context): - LOG.info(_("Set a new controller if needed.")) - new_controller = self.sdnve_client.sdnve_get_controller() - if new_controller: - self.notifier.info_update( - context, - {'new_controller': new_controller}) - LOG.info(_("Set the controller to a new controller: %s"), - new_controller) - - def _process_request(self, request, current): - new_request = dict( - (k, v) for k, v in request.items() - if v != current.get(k)) - - msg = _("Original SDN-VE HTTP request: %(orig)s; New request: %(new)s") - LOG.debug(msg, {'orig': request, 'new': new_request}) - return new_request - - # - # Network - # - - @_ha - def create_network(self, context, network): - LOG.debug(_("Create network in progress: %r"), network) - session = context.session - - tenant_id = self._get_tenant_id_for_create(context, network['network']) - # Create a new SDN-VE tenant if need be - sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( - tenant_id) - if sdnve_tenant is None: - raise sdnve_exc.SdnveException( - msg=_('Create net failed: no SDN-VE tenant.')) - - with session.begin(subtransactions=True): - net = super(SdnvePluginV2, self).create_network(context, network) - self._process_l3_create(context, net, network['network']) - - # Create SDN-VE network - (res, data) = self.sdnve_client.sdnve_create('network', net) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_network(context, net['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create net failed in SDN-VE: %s') % res)) - - LOG.debug(_("Created network: %s"), net['id']) - return net - - @_ha - def update_network(self, context, id, network): - LOG.debug(_("Update network in progress: %r"), network) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_network = super(SdnvePluginV2, self).get_network( - context, id) - processed_request['network'] = self._process_request( - network['network'], original_network) - net = super(SdnvePluginV2, self).update_network( - context, id, network) - self._process_l3_update(context, net, network['network']) - - if processed_request['network']: - (res, data) = self.sdnve_client.sdnve_update( - 'network', id, processed_request['network']) - if res not in constants.HTTP_ACCEPTABLE: - net = super(SdnvePluginV2, self).update_network( - context, id, {'network': original_network}) - raise sdnve_exc.SdnveException( - msg=(_('Update net failed in SDN-VE: %s') % res)) - - return net - - @_ha - def delete_network(self, context, id): - LOG.debug(_("Delete network in progress: %s"), id) - session = context.session - - with session.begin(subtransactions=True): - self._process_l3_delete(context, id) - super(SdnvePluginV2, self).delete_network(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('network', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _("Delete net failed after deleting the network in DB: %s"), - res) - - @_ha - def get_network(self, context, id, fields=None): - LOG.debug(_("Get network in progress: %s"), id) - return super(SdnvePluginV2, self).get_network(context, id, fields) - - @_ha - def get_networks(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - LOG.debug(_("Get networks in progress")) - return super(SdnvePluginV2, self).get_networks( - context, filters, fields, sorts, limit, marker, page_reverse) - - # - # Port - # - - @_ha - def create_port(self, context, port): - LOG.debug(_("Create port in progress: %r"), port) - session = context.session - - # Set port status as 'ACTIVE' to avoid needing the agent - port['port']['status'] = n_const.PORT_STATUS_ACTIVE - port_data = port['port'] - - with session.begin(subtransactions=True): - port = super(SdnvePluginV2, self).create_port(context, port) - if 'id' not in port: - return port - # If the tenant_id is set to '' by create_port, add the id to - # the request being sent to the controller as the controller - # requires a tenant id - tenant_id = port.get('tenant_id') - if not tenant_id: - LOG.debug(_("Create port does not have tenant id info")) - original_network = super(SdnvePluginV2, self).get_network( - context, port['network_id']) - original_tenant_id = original_network['tenant_id'] - port['tenant_id'] = original_tenant_id - LOG.debug( - _("Create port does not have tenant id info; " - "obtained is: %s"), - port['tenant_id']) - - os_tenant_id = tenant_id - id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( - os_tenant_id) - self._update_base_binding_dict(tenant_type) - self._process_portbindings_create_and_update(context, - port_data, port) - - # NOTE(mb): Remove this block when controller is updated - # Remove the information that the controller does not accept - sdnve_port = port.copy() - sdnve_port.pop('device_id', None) - sdnve_port.pop('device_owner', None) - - (res, data) = self.sdnve_client.sdnve_create('port', sdnve_port) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_port(context, port['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create port failed in SDN-VE: %s') % res)) - - LOG.debug(_("Created port: %s"), port.get('id', 'id not found')) - return port - - @_ha - def update_port(self, context, id, port): - LOG.debug(_("Update port in progress: %r"), port) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_port = super(SdnvePluginV2, self).get_port( - context, id) - processed_request['port'] = self._process_request( - port['port'], original_port) - updated_port = super(SdnvePluginV2, self).update_port( - context, id, port) - - os_tenant_id = updated_port['tenant_id'] - id_na, tenant_type = self.sdnve_client.sdnve_get_tenant_byid( - os_tenant_id) - self._update_base_binding_dict(tenant_type) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - - if processed_request['port']: - (res, data) = self.sdnve_client.sdnve_update( - 'port', id, processed_request['port']) - if res not in constants.HTTP_ACCEPTABLE: - updated_port = super(SdnvePluginV2, self).update_port( - context, id, {'port': original_port}) - raise sdnve_exc.SdnveException( - msg=(_('Update port failed in SDN-VE: %s') % res)) - - return updated_port - - @_ha - def delete_port(self, context, id, l3_port_check=True): - LOG.debug(_("Delete port in progress: %s"), id) - - # if needed, check to see if this is a port owned by - # an l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - self.disassociate_floatingips(context, id) - - super(SdnvePluginV2, self).delete_port(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('port', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _("Delete port operation failed in SDN-VE " - "after deleting the port from DB: %s"), res) - - # - # Subnet - # - - @_ha - def create_subnet(self, context, subnet): - LOG.debug(_("Create subnet in progress: %r"), subnet) - new_subnet = super(SdnvePluginV2, self).create_subnet(context, subnet) - - # Note(mb): Use of null string currently required by controller - sdnve_subnet = new_subnet.copy() - if subnet.get('gateway_ip') is None: - sdnve_subnet['gateway_ip'] = 'null' - (res, data) = self.sdnve_client.sdnve_create('subnet', sdnve_subnet) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_subnet(context, - new_subnet['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create subnet failed in SDN-VE: %s') % res)) - - LOG.debug(_("Subnet created: %s"), new_subnet['id']) - - return new_subnet - - @_ha - def update_subnet(self, context, id, subnet): - LOG.debug(_("Update subnet in progress: %r"), subnet) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_subnet = super(SdnvePluginV2, self).get_subnet( - context, id) - processed_request['subnet'] = self._process_request( - subnet['subnet'], original_subnet) - updated_subnet = super(SdnvePluginV2, self).update_subnet( - context, id, subnet) - - if processed_request['subnet']: - # Note(mb): Use of string containing null required by controller - if 'gateway_ip' in processed_request['subnet']: - if processed_request['subnet'].get('gateway_ip') is None: - processed_request['subnet']['gateway_ip'] = 'null' - (res, data) = self.sdnve_client.sdnve_update( - 'subnet', id, processed_request['subnet']) - if res not in constants.HTTP_ACCEPTABLE: - for key in subnet['subnet'].keys(): - subnet['subnet'][key] = original_subnet[key] - super(SdnvePluginV2, self).update_subnet( - context, id, subnet) - raise sdnve_exc.SdnveException( - msg=(_('Update subnet failed in SDN-VE: %s') % res)) - - return updated_subnet - - @_ha - def delete_subnet(self, context, id): - LOG.debug(_("Delete subnet in progress: %s"), id) - super(SdnvePluginV2, self).delete_subnet(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('subnet', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_("Delete subnet operation failed in SDN-VE after " - "deleting the subnet from DB: %s"), res) - - # - # Router - # - - @_ha - def create_router(self, context, router): - LOG.debug(_("Create router in progress: %r"), router) - - if router['router']['admin_state_up'] is False: - LOG.warning(_('Ignoring admin_state_up=False for router=%r. ' - 'Overriding with True'), router) - router['router']['admin_state_up'] = True - - tenant_id = self._get_tenant_id_for_create(context, router['router']) - # Create a new SDN-VE tenant if need be - sdnve_tenant = self.sdnve_client.sdnve_check_and_create_tenant( - tenant_id) - if sdnve_tenant is None: - raise sdnve_exc.SdnveException( - msg=_('Create router failed: no SDN-VE tenant.')) - - new_router = super(SdnvePluginV2, self).create_router(context, router) - # Create SDN-VE router - (res, data) = self.sdnve_client.sdnve_create('router', new_router) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_router(context, new_router['id']) - raise sdnve_exc.SdnveException( - msg=(_('Create router failed in SDN-VE: %s') % res)) - - LOG.debug(_("Router created: %r"), new_router) - return new_router - - @_ha - def update_router(self, context, id, router): - LOG.debug(_("Update router in progress: id=%(id)s " - "router=%(router)r"), - {'id': id, 'router': router}) - session = context.session - - processed_request = {} - if not router['router'].get('admin_state_up', True): - raise n_exc.NotImplementedError(_('admin_state_up=False ' - 'routers are not ' - 'supported.')) - - with session.begin(subtransactions=True): - original_router = super(SdnvePluginV2, self).get_router( - context, id) - processed_request['router'] = self._process_request( - router['router'], original_router) - updated_router = super(SdnvePluginV2, self).update_router( - context, id, router) - - if processed_request['router']: - egw = processed_request['router'].get('external_gateway_info') - # Check for existing empty set (different from None) in request - if egw == {}: - processed_request['router'][ - 'external_gateway_info'] = {'network_id': 'null'} - (res, data) = self.sdnve_client.sdnve_update( - 'router', id, processed_request['router']) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).update_router( - context, id, {'router': original_router}) - raise sdnve_exc.SdnveException( - msg=(_('Update router failed in SDN-VE: %s') % res)) - - return updated_router - - @_ha - def delete_router(self, context, id): - LOG.debug(_("Delete router in progress: %s"), id) - - super(SdnvePluginV2, self).delete_router(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('router', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error( - _("Delete router operation failed in SDN-VE after " - "deleting the router in DB: %s"), res) - - @_ha - def add_router_interface(self, context, router_id, interface_info): - LOG.debug(_("Add router interface in progress: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r"), - {'router_id': router_id, 'interface_info': interface_info}) - - new_interface = super(SdnvePluginV2, self).add_router_interface( - context, router_id, interface_info) - LOG.debug( - _("SdnvePluginV2.add_router_interface called. Port info: %s"), - new_interface) - request_info = interface_info.copy() - request_info['port_id'] = new_interface['port_id'] - # Add the subnet_id to the request sent to the controller - if 'subnet_id' not in interface_info: - request_info['subnet_id'] = new_interface['subnet_id'] - - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/add_router_interface', request_info) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).remove_router_interface( - context, router_id, interface_info) - raise sdnve_exc.SdnveException( - msg=(_('Update router-add-interface failed in SDN-VE: %s') % - res)) - - LOG.debug(_("Added router interface: %r"), new_interface) - return new_interface - - def _add_router_interface_only(self, context, router_id, interface_info): - LOG.debug(_("Add router interface only called: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r"), - {'router_id': router_id, 'interface_info': interface_info}) - - port_id = interface_info.get('port_id') - if port_id: - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/add_router_interface', interface_info) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_("SdnvePluginV2._add_router_interface_only: " - "failed to add the interface in the roll back." - " of a remove_router_interface operation")) - - @_ha - def remove_router_interface(self, context, router_id, interface_info): - LOG.debug(_("Remove router interface in progress: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r"), - {'router_id': router_id, 'interface_info': interface_info}) - - subnet_id = interface_info.get('subnet_id') - port_id = interface_info.get('port_id') - if not subnet_id: - if not port_id: - raise sdnve_exc.BadInputException(msg=_('No port ID')) - myport = super(SdnvePluginV2, self).get_port(context, port_id) - LOG.debug(_("SdnvePluginV2.remove_router_interface port: %s"), - myport) - myfixed_ips = myport.get('fixed_ips') - if not myfixed_ips: - raise sdnve_exc.BadInputException(msg=_('No fixed IP')) - subnet_id = myfixed_ips[0].get('subnet_id') - if subnet_id: - interface_info['subnet_id'] = subnet_id - LOG.debug( - _("SdnvePluginV2.remove_router_interface subnet_id: %s"), - subnet_id) - else: - if not port_id: - # The backend requires port id info in the request - subnet = super(SdnvePluginV2, self).get_subnet(context, - subnet_id) - df = {'device_id': [router_id], - 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF], - 'network_id': [subnet['network_id']]} - ports = self.get_ports(context, filters=df) - if ports: - pid = ports[0]['id'] - interface_info['port_id'] = pid - msg = ("SdnvePluginV2.remove_router_interface " - "subnet_id: %(sid)s port_id: %(pid)s") - LOG.debug(msg, {'sid': subnet_id, 'pid': pid}) - - (res, data) = self.sdnve_client.sdnve_update( - 'router', router_id + '/remove_router_interface', interface_info) - - if res not in constants.HTTP_ACCEPTABLE: - raise sdnve_exc.SdnveException( - msg=(_('Update router-remove-interface failed SDN-VE: %s') % - res)) - - session = context.session - with session.begin(subtransactions=True): - try: - info = super(SdnvePluginV2, self).remove_router_interface( - context, router_id, interface_info) - except Exception: - with excutils.save_and_reraise_exception(): - self._add_router_interface_only(context, - router_id, interface_info) - - return info - - # - # Floating Ip - # - - @_ha - def create_floatingip(self, context, floatingip): - LOG.debug(_("Create floatingip in progress: %r"), - floatingip) - new_floatingip = super(SdnvePluginV2, self).create_floatingip( - context, floatingip) - - (res, data) = self.sdnve_client.sdnve_create( - 'floatingip', {'floatingip': new_floatingip}) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).delete_floatingip( - context, new_floatingip['id']) - raise sdnve_exc.SdnveException( - msg=(_('Creating floating ip operation failed ' - 'in SDN-VE controller: %s') % res)) - - LOG.debug(_("Created floatingip : %r"), new_floatingip) - return new_floatingip - - @_ha - def update_floatingip(self, context, id, floatingip): - LOG.debug(_("Update floatingip in progress: %r"), floatingip) - session = context.session - - processed_request = {} - with session.begin(subtransactions=True): - original_floatingip = super( - SdnvePluginV2, self).get_floatingip(context, id) - processed_request['floatingip'] = self._process_request( - floatingip['floatingip'], original_floatingip) - updated_floatingip = super( - SdnvePluginV2, self).update_floatingip(context, id, floatingip) - - if processed_request['floatingip']: - (res, data) = self.sdnve_client.sdnve_update( - 'floatingip', id, - {'floatingip': processed_request['floatingip']}) - if res not in constants.HTTP_ACCEPTABLE: - super(SdnvePluginV2, self).update_floatingip( - context, id, {'floatingip': original_floatingip}) - raise sdnve_exc.SdnveException( - msg=(_('Update floating ip failed in SDN-VE: %s') % res)) - - return updated_floatingip - - @_ha - def delete_floatingip(self, context, id): - LOG.debug(_("Delete floatingip in progress: %s"), id) - super(SdnvePluginV2, self).delete_floatingip(context, id) - - (res, data) = self.sdnve_client.sdnve_delete('floatingip', id) - if res not in constants.HTTP_ACCEPTABLE: - LOG.error(_("Delete floatingip failed in SDN-VE: %s"), res) diff --git a/neutron/plugins/linuxbridge/README b/neutron/plugins/linuxbridge/README deleted file mode 100644 index b7601205f..000000000 --- a/neutron/plugins/linuxbridge/README +++ /dev/null @@ -1,169 +0,0 @@ -# -- Background - -The Neutron Linux Bridge plugin is a plugin that allows you to manage -connectivity between VMs on hosts that are capable of running a Linux Bridge. - -The Neutron Linux Bridge plugin consists of three components: - -1) The plugin itself: The plugin uses a database backend (mysql for - now) to store configuration and mappings that are used by the - agent. The mysql server runs on a central server (often the same - host as nova itself). - -2) The neutron service host which will be running neutron. This can - be run on the server running nova. - -3) An agent which runs on the host and communicates with the host operating - system. The agent gathers the configuration and mappings from - the mysql database running on the neutron host. - -The sections below describe how to configure and run the neutron -service with the Linux Bridge plugin. - -# -- Python library dependencies - - Make sure you have the following package(s) installedi on neutron server - host as well as any hosts which run the agent: - python-configobj - bridge-utils - python-mysqldb - sqlite3 - -# -- Nova configuration (controller node) - -1) Ensure that the neutron network manager is configured in the - nova.conf on the node that will be running nova-network. - -network_manager=nova.network.neutron.manager.NeutronManager - -# -- Nova configuration (compute node(s)) - -1) Configure the vif driver, and libvirt/vif type - -connection_type=libvirt -libvirt_type=qemu -libvirt_vif_type=ethernet -libvirt_vif_driver=nova.virt.libvirt.vif.NeutronLinuxBridgeVIFDriver -linuxnet_interface_driver=nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver - -2) If you want a DHCP server to be run for the VMs to acquire IPs, - add the following flag to your nova.conf file: - -neutron_use_dhcp=true - -(Note: For more details on how to work with Neutron using Nova, i.e. how to create networks and such, - please refer to the top level Neutron README which points to the relevant documentation.) - -# -- Neutron configuration - -Make the Linux Bridge plugin the current neutron plugin - -- edit neutron.conf and change the core_plugin - -core_plugin = neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2 - -# -- Database config. - -(Note: The plugin ships with a default SQLite in-memory database configuration, - and can be used to run tests without performing the suggested DB config below.) - -The Linux Bridge neutron plugin requires access to a mysql database in order -to store configuration and mappings that will be used by the agent. Here is -how to set up the database on the host that you will be running the neutron -service on. - -MySQL should be installed on the host, and all plugins and clients -must be configured with access to the database. - -To prep mysql, run: - -$ mysql -u root -p -e "create database neutron_linux_bridge" - -# log in to mysql service -$ mysql -u root -p -# The Linux Bridge Neutron agent running on each compute node must be able to -# make a mysql connection back to the main database server. -mysql> GRANT USAGE ON *.* to root@'yourremotehost' IDENTIFIED BY 'newpassword'; -# force update of authorization changes -mysql> FLUSH PRIVILEGES; - -(Note: If the remote connection fails to MySQL, you might need to add the IP address, - and/or fully-qualified hostname, and/or unqualified hostname in the above GRANT sql - command. Also, you might need to specify "ALL" instead of "USAGE".) - -# -- Plugin configuration - -- Edit the configuration file: - etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini - Make sure it matches your mysql configuration. This file must be updated - with the addresses and credentials to access the database. - - Note: debug and logging information should be updated in etc/neutron.conf - - Note: When running the tests, set the connection type to sqlite, and when - actually running the server set it to mysql. At any given time, only one - of these should be active in the conf file (you can comment out the other). - -- On the neutron server, network_vlan_ranges must be configured in - linuxbridge_conf.ini to specify the names of the physical networks - managed by the linuxbridge plugin, along with the ranges of VLAN IDs - available on each physical network for allocation to virtual - networks. An entry of the form - "::" specifies a VLAN range on - the named physical network. An entry of the form - "" specifies a named network without making a - range of VLANs available for allocation. Networks specified using - either form are available for adminstrators to create provider flat - networks and provider VLANs. Multiple VLAN ranges can be specified - for the same physical network. - - The following example linuxbridge_conf.ini entry shows three - physical networks that can be used to create provider networks, with - ranges of VLANs available for allocation on two of them: - - [VLANS] - network_vlan_ranges = physnet1:1000:2999,physnet1:3000:3999,physnet2,physnet3:1:4094 - - -# -- Agent configuration - -- Edit the configuration file: - etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini - -- Copy neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py - and etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini - to the compute node. - -- Copy the neutron.conf file to the compute node - - Note: debug and logging information should be updated in etc/neutron.conf - -- On each compute node, the network_interface_mappings must be - configured in linuxbridge_conf.ini to map each physical network name - to the physical interface connecting the node to that physical - network. Entries are of the form - ":". For example, one compute - node may use the following physical_inteface_mappings entries: - - [LINUX_BRIDGE] - physical_interface_mappings = physnet1:eth1,physnet2:eth2,physnet3:eth3 - - while another might use: - - [LINUX_BRIDGE] - physical_interface_mappings = physnet1:em3,physnet2:em2,physnet3:em1 - - -$ Run the following: - python linuxbridge_neutron_agent.py --config-file neutron.conf - --config-file linuxbridge_conf.ini - - Note that the the user running the agent must have sudo priviliges - to run various networking commands. Also, the agent can be - configured to use neutron-rootwrap, limiting what commands it can - run via sudo. See http://wiki.openstack.org/Packager/Rootwrap for - details on rootwrap. - - As an alternative to coping the agent python file, if neutron is - installed on the compute node, the agent can be run as - bin/neutron-linuxbridge-agent. diff --git a/neutron/plugins/linuxbridge/__init__.py b/neutron/plugins/linuxbridge/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/linuxbridge/agent/__init__.py b/neutron/plugins/linuxbridge/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py deleted file mode 100755 index 5db728655..000000000 --- a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py +++ /dev/null @@ -1,1026 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# -# Performs per host Linux Bridge configuration for Neutron. -# Based on the structure of the OpenVSwitch agent in the -# Neutron OpenVSwitch Plugin. -# @author: Sumit Naiksatam, Cisco Systems, Inc. - -import os -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg - -from neutron.agent import l2population_rpc as l2pop_rpc -from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import constants -from neutron.common import exceptions -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils as q_utils -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.common import constants as p_const -from neutron.plugins.linuxbridge.common import config # noqa -from neutron.plugins.linuxbridge.common import constants as lconst - - -LOG = logging.getLogger(__name__) - -BRIDGE_NAME_PREFIX = "brq" -TAP_INTERFACE_PREFIX = "tap" -BRIDGE_FS = "/sys/devices/virtual/net/" -BRIDGE_NAME_PLACEHOLDER = "bridge_name" -BRIDGE_INTERFACES_FS = BRIDGE_FS + BRIDGE_NAME_PLACEHOLDER + "/brif/" -DEVICE_NAME_PLACEHOLDER = "device_name" -BRIDGE_PORT_FS_FOR_DEVICE = BRIDGE_FS + DEVICE_NAME_PLACEHOLDER + "/brport" -VXLAN_INTERFACE_PREFIX = "vxlan-" - - -class NetworkSegment: - def __init__(self, network_type, physical_network, segmentation_id): - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id - - -class LinuxBridgeManager: - def __init__(self, interface_mappings, root_helper): - self.interface_mappings = interface_mappings - self.root_helper = root_helper - self.ip = ip_lib.IPWrapper(self.root_helper) - # VXLAN related parameters: - self.local_ip = cfg.CONF.VXLAN.local_ip - self.vxlan_mode = lconst.VXLAN_NONE - if cfg.CONF.VXLAN.enable_vxlan: - self.local_int = self.get_interface_by_ip(self.local_ip) - if self.local_int: - self.check_vxlan_support() - else: - LOG.warning(_('VXLAN is enabled, a valid local_ip ' - 'must be provided')) - # Store network mapping to segments - self.network_map = {} - - def interface_exists_on_bridge(self, bridge, interface): - directory = '/sys/class/net/%s/brif' % bridge - for filename in os.listdir(directory): - if filename == interface: - return True - return False - - def get_bridge_name(self, network_id): - if not network_id: - LOG.warning(_("Invalid Network ID, will lead to incorrect bridge" - "name")) - bridge_name = BRIDGE_NAME_PREFIX + network_id[0:11] - return bridge_name - - def get_subinterface_name(self, physical_interface, vlan_id): - if not vlan_id: - LOG.warning(_("Invalid VLAN ID, will lead to incorrect " - "subinterface name")) - subinterface_name = '%s.%s' % (physical_interface, vlan_id) - return subinterface_name - - def get_tap_device_name(self, interface_id): - if not interface_id: - LOG.warning(_("Invalid Interface ID, will lead to incorrect " - "tap device name")) - tap_device_name = TAP_INTERFACE_PREFIX + interface_id[0:11] - return tap_device_name - - def get_vxlan_device_name(self, segmentation_id): - if 0 <= int(segmentation_id) <= constants.MAX_VXLAN_VNI: - return VXLAN_INTERFACE_PREFIX + str(segmentation_id) - else: - LOG.warning(_("Invalid Segmentation ID: %s, will lead to " - "incorrect vxlan device name"), segmentation_id) - - def get_all_neutron_bridges(self): - neutron_bridge_list = [] - bridge_list = os.listdir(BRIDGE_FS) - for bridge in bridge_list: - if bridge.startswith(BRIDGE_NAME_PREFIX): - neutron_bridge_list.append(bridge) - return neutron_bridge_list - - def get_interfaces_on_bridge(self, bridge_name): - if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): - bridge_interface_path = BRIDGE_INTERFACES_FS.replace( - BRIDGE_NAME_PLACEHOLDER, bridge_name) - return os.listdir(bridge_interface_path) - else: - return [] - - def get_tap_devices_count(self, bridge_name): - bridge_interface_path = BRIDGE_INTERFACES_FS.replace( - BRIDGE_NAME_PLACEHOLDER, bridge_name) - try: - if_list = os.listdir(bridge_interface_path) - return len([interface for interface in if_list if - interface.startswith(TAP_INTERFACE_PREFIX)]) - except OSError: - return 0 - - def get_interface_by_ip(self, ip): - for device in self.ip.get_devices(): - if device.addr.list(to=ip): - return device.name - - def get_bridge_for_tap_device(self, tap_device_name): - bridges = self.get_all_neutron_bridges() - for bridge in bridges: - interfaces = self.get_interfaces_on_bridge(bridge) - if tap_device_name in interfaces: - return bridge - - return None - - def is_device_on_bridge(self, device_name): - if not device_name: - return False - else: - bridge_port_path = BRIDGE_PORT_FS_FOR_DEVICE.replace( - DEVICE_NAME_PLACEHOLDER, device_name) - return os.path.exists(bridge_port_path) - - def ensure_vlan_bridge(self, network_id, physical_interface, vlan_id): - """Create a vlan and bridge unless they already exist.""" - interface = self.ensure_vlan(physical_interface, vlan_id) - bridge_name = self.get_bridge_name(network_id) - ips, gateway = self.get_interface_details(interface) - if self.ensure_bridge(bridge_name, interface, ips, gateway): - return interface - - def ensure_vxlan_bridge(self, network_id, segmentation_id): - """Create a vxlan and bridge unless they already exist.""" - interface = self.ensure_vxlan(segmentation_id) - if not interface: - LOG.error(_("Failed creating vxlan interface for " - "%(segmentation_id)s"), - {segmentation_id: segmentation_id}) - return - bridge_name = self.get_bridge_name(network_id) - self.ensure_bridge(bridge_name, interface) - return interface - - def get_interface_details(self, interface): - device = self.ip.device(interface) - ips = device.addr.list(scope='global') - - # Update default gateway if necessary - gateway = device.route.get_gateway(scope='global') - return ips, gateway - - def ensure_flat_bridge(self, network_id, physical_interface): - """Create a non-vlan bridge unless it already exists.""" - bridge_name = self.get_bridge_name(network_id) - ips, gateway = self.get_interface_details(physical_interface) - if self.ensure_bridge(bridge_name, physical_interface, ips, gateway): - return physical_interface - - def ensure_local_bridge(self, network_id): - """Create a local bridge unless it already exists.""" - bridge_name = self.get_bridge_name(network_id) - return self.ensure_bridge(bridge_name) - - def ensure_vlan(self, physical_interface, vlan_id): - """Create a vlan unless it already exists.""" - interface = self.get_subinterface_name(physical_interface, vlan_id) - if not ip_lib.device_exists(interface, root_helper=self.root_helper): - LOG.debug(_("Creating subinterface %(interface)s for " - "VLAN %(vlan_id)s on interface " - "%(physical_interface)s"), - {'interface': interface, 'vlan_id': vlan_id, - 'physical_interface': physical_interface}) - if utils.execute(['ip', 'link', 'add', 'link', - physical_interface, - 'name', interface, 'type', 'vlan', 'id', - vlan_id], root_helper=self.root_helper): - return - if utils.execute(['ip', 'link', 'set', - interface, 'up'], root_helper=self.root_helper): - return - LOG.debug(_("Done creating subinterface %s"), interface) - return interface - - def ensure_vxlan(self, segmentation_id): - """Create a vxlan unless it already exists.""" - interface = self.get_vxlan_device_name(segmentation_id) - if not ip_lib.device_exists(interface, root_helper=self.root_helper): - LOG.debug(_("Creating vxlan interface %(interface)s for " - "VNI %(segmentation_id)s"), - {'interface': interface, - 'segmentation_id': segmentation_id}) - args = {'dev': self.local_int} - if self.vxlan_mode == lconst.VXLAN_MCAST: - args['group'] = cfg.CONF.VXLAN.vxlan_group - if cfg.CONF.VXLAN.ttl: - args['ttl'] = cfg.CONF.VXLAN.ttl - if cfg.CONF.VXLAN.tos: - args['tos'] = cfg.CONF.VXLAN.tos - if cfg.CONF.VXLAN.l2_population: - args['proxy'] = True - int_vxlan = self.ip.add_vxlan(interface, segmentation_id, **args) - int_vxlan.link.set_up() - LOG.debug(_("Done creating vxlan interface %s"), interface) - return interface - - def update_interface_ip_details(self, destination, source, ips, - gateway): - if ips or gateway: - dst_device = self.ip.device(destination) - src_device = self.ip.device(source) - - # Append IP's to bridge if necessary - if ips: - for ip in ips: - dst_device.addr.add(ip_version=ip['ip_version'], - cidr=ip['cidr'], - broadcast=ip['broadcast']) - - if gateway: - # Ensure that the gateway can be updated by changing the metric - metric = 100 - if 'metric' in gateway: - metric = gateway['metric'] - 1 - dst_device.route.add_gateway(gateway=gateway['gateway'], - metric=metric) - src_device.route.delete_gateway(gateway=gateway['gateway']) - - # Remove IP's from interface - if ips: - for ip in ips: - src_device.addr.delete(ip_version=ip['ip_version'], - cidr=ip['cidr']) - - def _bridge_exists_and_ensure_up(self, bridge_name): - """Check if the bridge exists and make sure it is up.""" - br = ip_lib.IPDevice(bridge_name, self.root_helper) - try: - # If the device doesn't exist this will throw a RuntimeError - br.link.set_up() - except RuntimeError: - return False - return True - - def ensure_bridge(self, bridge_name, interface=None, ips=None, - gateway=None): - """Create a bridge unless it already exists.""" - # _bridge_exists_and_ensure_up instead of device_exists is used here - # because there are cases where the bridge exists but it's not UP, - # for example: - # 1) A greenthread was executing this function and had not yet executed - # "ip link set bridge_name up" before eventlet switched to this - # thread running the same function - # 2) The Nova VIF driver was running concurrently and had just created - # the bridge, but had not yet put it UP - if not self._bridge_exists_and_ensure_up(bridge_name): - LOG.debug(_("Starting bridge %(bridge_name)s for subinterface " - "%(interface)s"), - {'bridge_name': bridge_name, 'interface': interface}) - if utils.execute(['brctl', 'addbr', bridge_name], - root_helper=self.root_helper): - return - if utils.execute(['brctl', 'setfd', bridge_name, - str(0)], root_helper=self.root_helper): - return - if utils.execute(['brctl', 'stp', bridge_name, - 'off'], root_helper=self.root_helper): - return - if utils.execute(['ip', 'link', 'set', bridge_name, - 'up'], root_helper=self.root_helper): - return - LOG.debug(_("Done starting bridge %(bridge_name)s for " - "subinterface %(interface)s"), - {'bridge_name': bridge_name, 'interface': interface}) - - if not interface: - return bridge_name - - # Update IP info if necessary - self.update_interface_ip_details(bridge_name, interface, ips, gateway) - - # Check if the interface is part of the bridge - if not self.interface_exists_on_bridge(bridge_name, interface): - try: - # Check if the interface is not enslaved in another bridge - if self.is_device_on_bridge(interface): - bridge = self.get_bridge_for_tap_device(interface) - utils.execute(['brctl', 'delif', bridge, interface], - root_helper=self.root_helper) - - utils.execute(['brctl', 'addif', bridge_name, interface], - root_helper=self.root_helper) - except Exception as e: - LOG.error(_("Unable to add %(interface)s to %(bridge_name)s! " - "Exception: %(e)s"), - {'interface': interface, 'bridge_name': bridge_name, - 'e': e}) - return - return bridge_name - - def ensure_physical_in_bridge(self, network_id, - network_type, - physical_network, - segmentation_id): - if network_type == p_const.TYPE_VXLAN: - if self.vxlan_mode == lconst.VXLAN_NONE: - LOG.error(_("Unable to add vxlan interface for network %s"), - network_id) - return - return self.ensure_vxlan_bridge(network_id, segmentation_id) - - physical_interface = self.interface_mappings.get(physical_network) - if not physical_interface: - LOG.error(_("No mapping for physical network %s"), - physical_network) - return - if network_type == p_const.TYPE_FLAT: - return self.ensure_flat_bridge(network_id, physical_interface) - elif network_type == p_const.TYPE_VLAN: - return self.ensure_vlan_bridge(network_id, physical_interface, - segmentation_id) - else: - LOG.error(_("Unknown network_type %(network_type)s for network " - "%(network_id)s."), {network_type: network_type, - network_id: network_id}) - - def add_tap_interface(self, network_id, network_type, physical_network, - segmentation_id, tap_device_name): - """Add tap interface. - - If a VIF has been plugged into a network, this function will - add the corresponding tap device to the relevant bridge. - """ - if not ip_lib.device_exists(tap_device_name, - root_helper=self.root_helper): - LOG.debug(_("Tap device: %s does not exist on " - "this host, skipped"), tap_device_name) - return False - - bridge_name = self.get_bridge_name(network_id) - if network_type == p_const.TYPE_LOCAL: - self.ensure_local_bridge(network_id) - elif not self.ensure_physical_in_bridge(network_id, - network_type, - physical_network, - segmentation_id): - return False - - # Check if device needs to be added to bridge - tap_device_in_bridge = self.get_bridge_for_tap_device(tap_device_name) - if not tap_device_in_bridge: - data = {'tap_device_name': tap_device_name, - 'bridge_name': bridge_name} - msg = _("Adding device %(tap_device_name)s to bridge " - "%(bridge_name)s") % data - LOG.debug(msg) - if utils.execute(['brctl', 'addif', bridge_name, tap_device_name], - root_helper=self.root_helper): - return False - else: - data = {'tap_device_name': tap_device_name, - 'bridge_name': bridge_name} - msg = _("%(tap_device_name)s already exists on bridge " - "%(bridge_name)s") % data - LOG.debug(msg) - return True - - def add_interface(self, network_id, network_type, physical_network, - segmentation_id, port_id): - self.network_map[network_id] = NetworkSegment(network_type, - physical_network, - segmentation_id) - tap_device_name = self.get_tap_device_name(port_id) - return self.add_tap_interface(network_id, network_type, - physical_network, segmentation_id, - tap_device_name) - - def delete_vlan_bridge(self, bridge_name): - if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): - interfaces_on_bridge = self.get_interfaces_on_bridge(bridge_name) - for interface in interfaces_on_bridge: - self.remove_interface(bridge_name, interface) - - if interface.startswith(VXLAN_INTERFACE_PREFIX): - self.delete_vxlan(interface) - continue - - for physical_interface in self.interface_mappings.itervalues(): - if (interface.startswith(physical_interface)): - ips, gateway = self.get_interface_details(bridge_name) - if ips: - # This is a flat network or a VLAN interface that - # was setup outside of neutron => return IP's from - # bridge to interface - self.update_interface_ip_details(interface, - bridge_name, - ips, gateway) - elif physical_interface != interface: - self.delete_vlan(interface) - - LOG.debug(_("Deleting bridge %s"), bridge_name) - if utils.execute(['ip', 'link', 'set', bridge_name, 'down'], - root_helper=self.root_helper): - return - if utils.execute(['brctl', 'delbr', bridge_name], - root_helper=self.root_helper): - return - LOG.debug(_("Done deleting bridge %s"), bridge_name) - - else: - LOG.error(_("Cannot delete bridge %s, does not exist"), - bridge_name) - - def remove_empty_bridges(self): - for network_id in self.network_map.keys(): - bridge_name = self.get_bridge_name(network_id) - if not self.get_tap_devices_count(bridge_name): - self.delete_vlan_bridge(bridge_name) - del self.network_map[network_id] - - def remove_interface(self, bridge_name, interface_name): - if ip_lib.device_exists(bridge_name, root_helper=self.root_helper): - if not self.is_device_on_bridge(interface_name): - return True - LOG.debug(_("Removing device %(interface_name)s from bridge " - "%(bridge_name)s"), - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - if utils.execute(['brctl', 'delif', bridge_name, interface_name], - root_helper=self.root_helper): - return False - LOG.debug(_("Done removing device %(interface_name)s from bridge " - "%(bridge_name)s"), - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - return True - else: - LOG.debug(_("Cannot remove device %(interface_name)s bridge " - "%(bridge_name)s does not exist"), - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - return False - - def delete_vlan(self, interface): - if ip_lib.device_exists(interface, root_helper=self.root_helper): - LOG.debug(_("Deleting subinterface %s for vlan"), interface) - if utils.execute(['ip', 'link', 'set', interface, 'down'], - root_helper=self.root_helper): - return - if utils.execute(['ip', 'link', 'delete', interface], - root_helper=self.root_helper): - return - LOG.debug(_("Done deleting subinterface %s"), interface) - - def delete_vxlan(self, interface): - if ip_lib.device_exists(interface, root_helper=self.root_helper): - LOG.debug(_("Deleting vxlan interface %s for vlan"), - interface) - int_vxlan = self.ip.device(interface) - int_vxlan.link.set_down() - int_vxlan.link.delete() - LOG.debug(_("Done deleting vxlan interface %s"), interface) - - def get_tap_devices(self): - devices = set() - for device in os.listdir(BRIDGE_FS): - if device.startswith(TAP_INTERFACE_PREFIX): - devices.add(device) - return devices - - def vxlan_ucast_supported(self): - if not cfg.CONF.VXLAN.l2_population: - return False - if not ip_lib.iproute_arg_supported( - ['bridge', 'fdb'], 'append', self.root_helper): - LOG.warning(_('Option "%(option)s" must be supported by command ' - '"%(command)s" to enable %(mode)s mode') % - {'option': 'append', - 'command': 'bridge fdb', - 'mode': 'VXLAN UCAST'}) - return False - for segmentation_id in range(1, constants.MAX_VXLAN_VNI + 1): - if not ip_lib.device_exists( - self.get_vxlan_device_name(segmentation_id), - root_helper=self.root_helper): - break - else: - LOG.error(_('No valid Segmentation ID to perform UCAST test.')) - return False - - test_iface = self.ensure_vxlan(segmentation_id) - try: - utils.execute( - cmd=['bridge', 'fdb', 'append', constants.FLOODING_ENTRY[0], - 'dev', test_iface, 'dst', '1.1.1.1'], - root_helper=self.root_helper) - return True - except RuntimeError: - return False - finally: - self.delete_vxlan(test_iface) - - def vxlan_mcast_supported(self): - if not cfg.CONF.VXLAN.vxlan_group: - LOG.warning(_('VXLAN muticast group must be provided in ' - 'vxlan_group option to enable VXLAN MCAST mode')) - return False - if not ip_lib.iproute_arg_supported( - ['ip', 'link', 'add', 'type', 'vxlan'], - 'proxy', self.root_helper): - LOG.warning(_('Option "%(option)s" must be supported by command ' - '"%(command)s" to enable %(mode)s mode') % - {'option': 'proxy', - 'command': 'ip link add type vxlan', - 'mode': 'VXLAN MCAST'}) - - return False - return True - - def vxlan_module_supported(self): - try: - utils.execute(cmd=['modinfo', 'vxlan']) - return True - except RuntimeError: - return False - - def check_vxlan_support(self): - self.vxlan_mode = lconst.VXLAN_NONE - if not self.vxlan_module_supported(): - LOG.error(_('Linux kernel vxlan module and iproute2 3.8 or above ' - 'are required to enable VXLAN.')) - raise exceptions.VxlanNetworkUnsupported() - - if self.vxlan_ucast_supported(): - self.vxlan_mode = lconst.VXLAN_UCAST - elif self.vxlan_mcast_supported(): - self.vxlan_mode = lconst.VXLAN_MCAST - else: - raise exceptions.VxlanNetworkUnsupported() - LOG.debug(_('Using %s VXLAN mode'), self.vxlan_mode) - - def fdb_ip_entry_exists(self, mac, ip, interface): - entries = utils.execute(['ip', 'neigh', 'show', 'to', ip, - 'dev', interface], - root_helper=self.root_helper) - return mac in entries - - def fdb_bridge_entry_exists(self, mac, interface, agent_ip=None): - entries = utils.execute(['bridge', 'fdb', 'show', 'dev', interface], - root_helper=self.root_helper) - if not agent_ip: - return mac in entries - - return (agent_ip in entries and mac in entries) - - def add_fdb_ip_entry(self, mac, ip, interface): - utils.execute(['ip', 'neigh', 'replace', ip, 'lladdr', mac, - 'dev', interface, 'nud', 'permanent'], - root_helper=self.root_helper, - check_exit_code=False) - - def remove_fdb_ip_entry(self, mac, ip, interface): - utils.execute(['ip', 'neigh', 'del', ip, 'lladdr', mac, - 'dev', interface], - root_helper=self.root_helper, - check_exit_code=False) - - def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"): - utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface, - 'dst', agent_ip], - root_helper=self.root_helper, - check_exit_code=False) - - def remove_fdb_bridge_entry(self, mac, agent_ip, interface): - utils.execute(['bridge', 'fdb', 'del', mac, 'dev', interface, - 'dst', agent_ip], - root_helper=self.root_helper, - check_exit_code=False) - - def add_fdb_entries(self, agent_ip, ports, interface): - for mac, ip in ports: - if mac != constants.FLOODING_ENTRY[0]: - self.add_fdb_ip_entry(mac, ip, interface) - self.add_fdb_bridge_entry(mac, agent_ip, interface) - elif self.vxlan_mode == lconst.VXLAN_UCAST: - if self.fdb_bridge_entry_exists(mac, interface): - self.add_fdb_bridge_entry(mac, agent_ip, interface, - "append") - else: - self.add_fdb_bridge_entry(mac, agent_ip, interface) - - def remove_fdb_entries(self, agent_ip, ports, interface): - for mac, ip in ports: - if mac != constants.FLOODING_ENTRY[0]: - self.remove_fdb_ip_entry(mac, ip, interface) - self.remove_fdb_bridge_entry(mac, agent_ip, interface) - elif self.vxlan_mode == lconst.VXLAN_UCAST: - self.remove_fdb_bridge_entry(mac, agent_ip, interface) - - -class LinuxBridgeRpcCallbacks(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin, - l2pop_rpc.L2populationRpcCallBackMixin): - - # Set RPC API version to 1.0 by default. - # history - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - def __init__(self, context, agent): - super(LinuxBridgeRpcCallbacks, self).__init__() - self.context = context - self.agent = agent - self.sg_agent = agent - - def network_delete(self, context, **kwargs): - LOG.debug(_("network_delete received")) - network_id = kwargs.get('network_id') - bridge_name = self.agent.br_mgr.get_bridge_name(network_id) - LOG.debug(_("Delete %s"), bridge_name) - self.agent.br_mgr.delete_vlan_bridge(bridge_name) - - def port_update(self, context, **kwargs): - port_id = kwargs['port']['id'] - tap_name = self.agent.br_mgr.get_tap_device_name(port_id) - # Put the tap name in the updated_devices set. - # Do not store port details, as if they're used for processing - # notifications there is no guarantee the notifications are - # processed in the same order as the relevant API requests. - self.agent.updated_devices.add(tap_name) - LOG.debug(_("port_update RPC received for port: %s"), port_id) - - def fdb_add(self, context, fdb_entries): - LOG.debug(_("fdb_add received")) - for network_id, values in fdb_entries.items(): - segment = self.agent.br_mgr.network_map.get(network_id) - if not segment: - return - - if segment.network_type != p_const.TYPE_VXLAN: - return - - interface = self.agent.br_mgr.get_vxlan_device_name( - segment.segmentation_id) - - agent_ports = values.get('ports') - for agent_ip, ports in agent_ports.items(): - if agent_ip == self.agent.br_mgr.local_ip: - continue - - self.agent.br_mgr.add_fdb_entries(agent_ip, - ports, - interface) - - def fdb_remove(self, context, fdb_entries): - LOG.debug(_("fdb_remove received")) - for network_id, values in fdb_entries.items(): - segment = self.agent.br_mgr.network_map.get(network_id) - if not segment: - return - - if segment.network_type != p_const.TYPE_VXLAN: - return - - interface = self.agent.br_mgr.get_vxlan_device_name( - segment.segmentation_id) - - agent_ports = values.get('ports') - for agent_ip, ports in agent_ports.items(): - if agent_ip == self.agent.br_mgr.local_ip: - continue - - self.agent.br_mgr.remove_fdb_entries(agent_ip, - ports, - interface) - - def _fdb_chg_ip(self, context, fdb_entries): - LOG.debug(_("update chg_ip received")) - for network_id, agent_ports in fdb_entries.items(): - segment = self.agent.br_mgr.network_map.get(network_id) - if not segment: - return - - if segment.network_type != p_const.TYPE_VXLAN: - return - - interface = self.agent.br_mgr.get_vxlan_device_name( - segment.segmentation_id) - - for agent_ip, state in agent_ports.items(): - if agent_ip == self.agent.br_mgr.local_ip: - continue - - after = state.get('after') - for mac, ip in after: - self.agent.br_mgr.add_fdb_ip_entry(mac, ip, interface) - - before = state.get('before') - for mac, ip in before: - self.agent.br_mgr.remove_fdb_ip_entry(mac, ip, interface) - - def fdb_update(self, context, fdb_entries): - LOG.debug(_("fdb_update received")) - for action, values in fdb_entries.items(): - method = '_fdb_' + action - if not hasattr(self, method): - raise NotImplementedError() - - getattr(self, method)(context, values) - - -class LinuxBridgePluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class LinuxBridgeNeutronAgentRPC(sg_rpc.SecurityGroupAgentRpcMixin): - - def __init__(self, interface_mappings, polling_interval, - root_helper): - self.polling_interval = polling_interval - self.root_helper = root_helper - self.setup_linux_bridge(interface_mappings) - configurations = {'interface_mappings': interface_mappings} - if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE: - configurations['tunneling_ip'] = self.br_mgr.local_ip - configurations['tunnel_types'] = [p_const.TYPE_VXLAN] - configurations['l2_population'] = cfg.CONF.VXLAN.l2_population - self.agent_state = { - 'binary': 'neutron-linuxbridge-agent', - 'host': cfg.CONF.host, - 'topic': constants.L2_AGENT_TOPIC, - 'configurations': configurations, - 'agent_type': constants.AGENT_TYPE_LINUXBRIDGE, - 'start_flag': True} - - # stores received port_updates for processing by the main loop - self.updated_devices = set() - self.setup_rpc(interface_mappings.values()) - self.init_firewall() - - def _report_state(self): - try: - devices = len(self.br_mgr.get_tap_devices()) - self.agent_state.get('configurations')['devices'] = devices - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def setup_rpc(self, physical_interfaces): - if physical_interfaces: - mac = utils.get_interface_mac(physical_interfaces[0]) - else: - devices = ip_lib.IPWrapper(self.root_helper).get_devices(True) - if devices: - mac = utils.get_interface_mac(devices[0].name) - else: - LOG.error(_("Unable to obtain MAC address for unique ID. " - "Agent terminated!")) - exit(1) - self.agent_id = '%s%s' % ('lb', (mac.replace(":", ""))) - LOG.info(_("RPC agent_id: %s"), self.agent_id) - - self.topic = topics.AGENT - self.plugin_rpc = LinuxBridgePluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - # RPC network init - self.context = context.get_admin_context_without_session() - # Handle updates from service - self.endpoints = [LinuxBridgeRpcCallbacks(self.context, self)] - # Define the listening consumers for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.NETWORK, topics.DELETE], - [topics.SECURITY_GROUP, topics.UPDATE]] - if cfg.CONF.VXLAN.l2_population: - consumers.append([topics.L2POPULATION, - topics.UPDATE, cfg.CONF.host]) - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - report_interval = cfg.CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def setup_linux_bridge(self, interface_mappings): - self.br_mgr = LinuxBridgeManager(interface_mappings, self.root_helper) - - def remove_port_binding(self, network_id, interface_id): - bridge_name = self.br_mgr.get_bridge_name(network_id) - tap_device_name = self.br_mgr.get_tap_device_name(interface_id) - return self.br_mgr.remove_interface(bridge_name, tap_device_name) - - def process_network_devices(self, device_info): - resync_a = False - resync_b = False - - self.prepare_devices_filter(device_info.get('added')) - - if device_info.get('updated'): - self.refresh_firewall() - - # Updated devices are processed the same as new ones, as their - # admin_state_up may have changed. The set union prevents duplicating - # work when a device is new and updated in the same polling iteration. - devices_added_updated = (set(device_info.get('added')) - | set(device_info.get('updated'))) - if devices_added_updated: - resync_a = self.treat_devices_added_updated(devices_added_updated) - - if device_info.get('removed'): - resync_b = self.treat_devices_removed(device_info['removed']) - # If one of the above operations fails => resync with plugin - return (resync_a | resync_b) - - def treat_devices_added_updated(self, devices): - resync = False - - for device in devices: - LOG.debug(_("Treating added or updated device: %s"), device) - try: - details = self.plugin_rpc.get_device_details(self.context, - device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get port details for " - "%(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if 'port_id' in details: - LOG.info(_("Port %(device)s updated. Details: %(details)s"), - {'device': device, 'details': details}) - if details['admin_state_up']: - # create the networking for the port - network_type = details.get('network_type') - if network_type: - segmentation_id = details.get('segmentation_id') - else: - # compatibility with pre-Havana RPC vlan_id encoding - vlan_id = details.get('vlan_id') - (network_type, - segmentation_id) = lconst.interpret_vlan_id(vlan_id) - if self.br_mgr.add_interface(details['network_id'], - network_type, - details['physical_network'], - segmentation_id, - details['port_id']): - - # update plugin about port status - self.plugin_rpc.update_device_up(self.context, - device, - self.agent_id, - cfg.CONF.host) - else: - self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - else: - self.remove_port_binding(details['network_id'], - details['port_id']) - else: - LOG.info(_("Device %s not defined on plugin"), device) - return resync - - def treat_devices_removed(self, devices): - resync = False - self.remove_devices_filter(devices) - for device in devices: - LOG.info(_("Attachment %s removed"), device) - details = None - try: - details = self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("port_removed failed for %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - if details and details['exists']: - LOG.info(_("Port %s updated."), device) - else: - LOG.debug(_("Device %s not defined on plugin"), device) - self.br_mgr.remove_empty_bridges() - return resync - - def scan_devices(self, registered_devices, updated_devices): - curr_devices = self.br_mgr.get_tap_devices() - device_info = {} - device_info['current'] = curr_devices - device_info['added'] = curr_devices - registered_devices - # we don't want to process updates for devices that don't exist - device_info['updated'] = updated_devices & curr_devices - # we need to clean up after devices are removed - device_info['removed'] = registered_devices - curr_devices - return device_info - - def _device_info_has_changes(self, device_info): - return (device_info.get('added') - or device_info.get('updated') - or device_info.get('removed')) - - def daemon_loop(self): - sync = True - devices = set() - - LOG.info(_("LinuxBridge Agent RPC Daemon Started!")) - - while True: - start = time.time() - if sync: - LOG.info(_("Agent out of sync with plugin!")) - devices.clear() - sync = False - device_info = {} - # Save updated devices dict to perform rollback in case - # resync would be needed, and then clear self.updated_devices. - # As the greenthread should not yield between these - # two statements, this will should be thread-safe. - updated_devices_copy = self.updated_devices - self.updated_devices = set() - try: - device_info = self.scan_devices(devices, updated_devices_copy) - if self._device_info_has_changes(device_info): - LOG.debug(_("Agent loop found changes! %s"), device_info) - # If treat devices fails - indicates must resync with - # plugin - sync = self.process_network_devices(device_info) - devices = device_info['current'] - except Exception: - LOG.exception(_("Error in agent loop. Devices info: %s"), - device_info) - sync = True - # Restore devices that were removed from this set earlier - # without overwriting ones that may have arrived since. - self.updated_devices |= updated_devices_copy - - # sleep till end of polling interval - elapsed = (time.time() - start) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - - -def main(): - common_config.init(sys.argv[1:]) - - common_config.setup_logging(cfg.CONF) - try: - interface_mappings = q_utils.parse_mappings( - cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) - except ValueError as e: - LOG.error(_("Parsing physical_interface_mappings failed: %s." - " Agent terminated!"), e) - sys.exit(1) - LOG.info(_("Interface mappings: %s"), interface_mappings) - - polling_interval = cfg.CONF.AGENT.polling_interval - root_helper = cfg.CONF.AGENT.root_helper - agent = LinuxBridgeNeutronAgentRPC(interface_mappings, - polling_interval, - root_helper) - LOG.info(_("Agent initialized successfully, now running... ")) - agent.daemon_loop() - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/linuxbridge/common/__init__.py b/neutron/plugins/linuxbridge/common/__init__.py deleted file mode 100644 index 5bb15232d..000000000 --- a/neutron/plugins/linuxbridge/common/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. diff --git a/neutron/plugins/linuxbridge/common/config.py b/neutron/plugins/linuxbridge/common/config.py deleted file mode 100644 index 8736d63a6..000000000 --- a/neutron/plugins/linuxbridge/common/config.py +++ /dev/null @@ -1,78 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# @author: Rohit Agarwalla, Cisco Systems, Inc. - -from oslo.config import cfg - -from neutron.agent.common import config - -DEFAULT_VLAN_RANGES = [] -DEFAULT_INTERFACE_MAPPINGS = [] -DEFAULT_VXLAN_GROUP = '224.0.0.1' - - -vlan_opts = [ - cfg.StrOpt('tenant_network_type', default='local', - help=_("Network type for tenant networks " - "(local, vlan, or none)")), - cfg.ListOpt('network_vlan_ranges', - default=DEFAULT_VLAN_RANGES, - help=_("List of :: " - "or ")), -] - -vxlan_opts = [ - cfg.BoolOpt('enable_vxlan', default=False, - help=_("Enable VXLAN on the agent. Can be enabled when " - "agent is managed by ml2 plugin using linuxbridge " - "mechanism driver")), - cfg.IntOpt('ttl', - help=_("TTL for vxlan interface protocol packets.")), - cfg.IntOpt('tos', - help=_("TOS for vxlan interface protocol packets.")), - cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, - help=_("Multicast group for vxlan interface.")), - cfg.StrOpt('local_ip', default='', - help=_("Local IP address of the VXLAN endpoints.")), - cfg.BoolOpt('l2_population', default=False, - help=_("Extension to use alongside ml2 plugin's l2population " - "mechanism driver. It enables the plugin to populate " - "VXLAN forwarding table.")), -] - -bridge_opts = [ - cfg.ListOpt('physical_interface_mappings', - default=DEFAULT_INTERFACE_MAPPINGS, - help=_("List of :")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), - cfg.BoolOpt('rpc_support_old_agents', default=False, - help=_("Enable server RPC compatibility with old agents")), -] - - -cfg.CONF.register_opts(vlan_opts, "VLANS") -cfg.CONF.register_opts(vxlan_opts, "VXLAN") -cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") -cfg.CONF.register_opts(agent_opts, "AGENT") -config.register_agent_state_opts_helper(cfg.CONF) -config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/linuxbridge/common/constants.py b/neutron/plugins/linuxbridge/common/constants.py deleted file mode 100644 index 6dee88f40..000000000 --- a/neutron/plugins/linuxbridge/common/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. - - -from neutron.plugins.common import constants as p_const - - -FLAT_VLAN_ID = -1 -LOCAL_VLAN_ID = -2 - -# Supported VXLAN features -VXLAN_NONE = 'not_supported' -VXLAN_MCAST = 'multicast_flooding' -VXLAN_UCAST = 'unicast_flooding' - - -# TODO(rkukura): Eventually remove this function, which provides -# temporary backward compatibility with pre-Havana RPC and DB vlan_id -# encoding. -def interpret_vlan_id(vlan_id): - """Return (network_type, segmentation_id) tuple for encoded vlan_id.""" - if vlan_id == LOCAL_VLAN_ID: - return (p_const.TYPE_LOCAL, None) - elif vlan_id == FLAT_VLAN_ID: - return (p_const.TYPE_FLAT, None) - else: - return (p_const.TYPE_VLAN, vlan_id) diff --git a/neutron/plugins/linuxbridge/db/__init__.py b/neutron/plugins/linuxbridge/db/__init__.py deleted file mode 100644 index 33daf1f33..000000000 --- a/neutron/plugins/linuxbridge/db/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 Cisco Systems, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, Cisco Systems, Inc. -# diff --git a/neutron/plugins/linuxbridge/db/l2network_db_v2.py b/neutron/plugins/linuxbridge/db/l2network_db_v2.py deleted file mode 100644 index 416bd2f59..000000000 --- a/neutron/plugins/linuxbridge/db/l2network_db_v2.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six import moves -from sqlalchemy.orm import exc - -from neutron.common import exceptions as n_exc -import neutron.db.api as db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron import manager -from neutron.openstack.common import log as logging -from neutron.plugins.linuxbridge.common import config # noqa -from neutron.plugins.linuxbridge.common import constants -from neutron.plugins.linuxbridge.db import l2network_models_v2 - -LOG = logging.getLogger(__name__) - - -def sync_network_states(network_vlan_ranges): - """Synchronize network_states table with current configured VLAN ranges.""" - - session = db.get_session() - with session.begin(): - # get existing allocations for all physical networks - allocations = dict() - states = (session.query(l2network_models_v2.NetworkState). - all()) - for state in states: - if state.physical_network not in allocations: - allocations[state.physical_network] = set() - allocations[state.physical_network].add(state) - - # process vlan ranges for each configured physical network - for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): - # determine current configured allocatable vlans for this - # physical network - vlan_ids = set() - for vlan_range in vlan_ranges: - vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) - - # remove from table unallocated vlans not currently allocatable - if physical_network in allocations: - for state in allocations[physical_network]: - try: - # see if vlan is allocatable - vlan_ids.remove(state.vlan_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not state.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing vlan %(vlan_id)s on " - "physical network %(physical_network)s" - " from pool"), - {'vlan_id': state.vlan_id, - 'physical_network': physical_network}) - session.delete(state) - del allocations[physical_network] - - # add missing allocatable vlans to table - for vlan_id in sorted(vlan_ids): - state = l2network_models_v2.NetworkState(physical_network, - vlan_id) - session.add(state) - - # remove from table unallocated vlans for any unconfigured physical - # networks - for states in allocations.itervalues(): - for state in states: - if not state.allocated: - LOG.debug(_("Removing vlan %(vlan_id)s on physical " - "network %(physical_network)s" - " from pool"), - {'vlan_id': state.vlan_id, - 'physical_network': state.physical_network}) - session.delete(state) - - -def get_network_state(physical_network, vlan_id): - """Get state of specified network.""" - - session = db.get_session() - try: - state = (session.query(l2network_models_v2.NetworkState). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - one()) - return state - except exc.NoResultFound: - return None - - -def reserve_network(session): - with session.begin(subtransactions=True): - state = (session.query(l2network_models_v2.NetworkState). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if not state: - raise n_exc.NoNetworkAvailable() - LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " - "%(physical_network)s from pool"), - {'vlan_id': state.vlan_id, - 'physical_network': state.physical_network}) - state.allocated = True - return (state.physical_network, state.vlan_id) - - -def reserve_specific_network(session, physical_network, vlan_id): - with session.begin(subtransactions=True): - try: - state = (session.query(l2network_models_v2.NetworkState). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - if state.allocated: - if vlan_id == constants.FLAT_VLAN_ID: - raise n_exc.FlatNetworkInUse( - physical_network=physical_network) - else: - raise n_exc.VlanIdInUse(vlan_id=vlan_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - state.allocated = True - except exc.NoResultFound: - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - state = l2network_models_v2.NetworkState(physical_network, vlan_id) - state.allocated = True - session.add(state) - - -def release_network(session, physical_network, vlan_id, network_vlan_ranges): - with session.begin(subtransactions=True): - try: - state = (session.query(l2network_models_v2.NetworkState). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - state.allocated = False - inside = False - for vlan_range in network_vlan_ranges.get(physical_network, []): - if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: - inside = True - break - if inside: - LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " - "%(physical_network)s to pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - else: - LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " - "%(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - session.delete(state) - except exc.NoResultFound: - LOG.warning(_("vlan_id %(vlan_id)s on physical network " - "%(physical_network)s not found"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - - -def add_network_binding(session, network_id, physical_network, vlan_id): - with session.begin(subtransactions=True): - binding = l2network_models_v2.NetworkBinding(network_id, - physical_network, vlan_id) - session.add(binding) - - -def get_network_binding(session, network_id): - try: - binding = (session.query(l2network_models_v2.NetworkBinding). - filter_by(network_id=network_id). - one()) - return binding - except exc.NoResultFound: - return - - -def get_port_from_device(device): - """Get port from database.""" - LOG.debug(_("get_port_from_device() called")) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id.startswith(device)) - port_and_sgs = query.all() - if not port_and_sgs: - return - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict['security_groups'] = [] - for port_in_db, sg_id in port_and_sgs: - if sg_id: - port_dict['security_groups'].append(sg_id) - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict - - -def set_port_status(port_id, status): - """Set the port status.""" - LOG.debug(_("set_port_status as %s called"), status) - session = db.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.merge(port) - session.flush() - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/linuxbridge/db/l2network_models_v2.py b/neutron/plugins/linuxbridge/db/l2network_models_v2.py deleted file mode 100644 index 0c08e29c5..000000000 --- a/neutron/plugins/linuxbridge/db/l2network_models_v2.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sqlalchemy as sa - -from neutron.db import model_base - - -class NetworkState(model_base.BASEV2): - """Represents state of vlan_id on physical network.""" - __tablename__ = 'network_states' - - physical_network = sa.Column(sa.String(64), nullable=False, - primary_key=True) - vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False) - - def __init__(self, physical_network, vlan_id): - self.physical_network = physical_network - self.vlan_id = vlan_id - self.allocated = False - - def __repr__(self): - return "" % (self.physical_network, - self.vlan_id, self.allocated) - - -class NetworkBinding(model_base.BASEV2): - """Represents binding of virtual network to physical network and vlan.""" - __tablename__ = 'network_bindings' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - physical_network = sa.Column(sa.String(64)) - vlan_id = sa.Column(sa.Integer, nullable=False) - - def __init__(self, network_id, physical_network, vlan_id): - self.network_id = network_id - self.physical_network = physical_network - self.vlan_id = vlan_id - - def __repr__(self): - return "" % (self.network_id, - self.physical_network, - self.vlan_id) diff --git a/neutron/plugins/linuxbridge/lb_neutron_plugin.py b/neutron/plugins/linuxbridge/lb_neutron_plugin.py deleted file mode 100644 index 412275d24..000000000 --- a/neutron/plugins/linuxbridge/lb_neutron_plugin.py +++ /dev/null @@ -1,530 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants as q_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import api as db_api -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_gwmode_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron import manager -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.common import utils as plugin_utils -from neutron.plugins.linuxbridge.common import constants -from neutron.plugins.linuxbridge.db import l2network_db_v2 as db - - -LOG = logging.getLogger(__name__) - - -class LinuxBridgeRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin - ): - - # history - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - # Device names start with "tap" - TAP_PREFIX_LEN = 3 - - @classmethod - def get_port_from_device(cls, device): - port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) - if port: - port['device'] = device - return port - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self.get_port_from_device(device) - if port: - binding = db.get_network_binding(db_api.get_session(), - port['network_id']) - (network_type, - segmentation_id) = constants.interpret_vlan_id(binding.vlan_id) - entry = {'device': device, - 'network_type': network_type, - 'physical_network': binding.physical_network, - 'segmentation_id': segmentation_id, - 'network_id': port['network_id'], - 'port_id': port['id'], - 'admin_state_up': port['admin_state_up']} - if cfg.CONF.AGENT.rpc_support_old_agents: - entry['vlan_id'] = binding.vlan_id - new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] - else q_const.PORT_STATUS_DOWN) - if port['status'] != new_status: - db.set_port_status(port['id'], new_status) - else: - entry = {'device': device} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - # TODO(garyk) - live migration and port status - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - port = self.get_port_from_device(device) - LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - plugin = manager.NeutronManager.get_plugin() - if port: - entry = {'device': device, - 'exists': True} - if (host and not - plugin.get_port_host(rpc_context, port['id']) == host): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - elif port['status'] != q_const.PORT_STATUS_DOWN: - # Set port status to DOWN - db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) - else: - entry = {'device': device, - 'exists': False} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_up(self, rpc_context, **kwargs): - """Device is up on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - port = self.get_port_from_device(device) - LOG.debug(_("Device %(device)s up on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - plugin = manager.NeutronManager.get_plugin() - if port: - if (host and - not plugin.get_port_host(rpc_context, port['id']) == host): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - return - elif port['status'] != q_const.PORT_STATUS_ACTIVE: - db.set_port_status(port['id'], - q_const.PORT_STATUS_ACTIVE) - else: - LOG.debug(_("%s can not be found in database"), device) - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - '''Agent side of the linux bridge rpc API. - - API version history: - 1.0 - Initial version. - 1.1 - Added get_active_networks_info, create_dhcp_port, - and update_dhcp_port methods. - - - ''' - - BASE_RPC_API_VERSION = '1.1' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic = topic - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - - def network_delete(self, context, network_id): - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, physical_network, vlan_id): - network_type, segmentation_id = constants.interpret_vlan_id(vlan_id) - kwargs = {'port': port, - 'network_type': network_type, - 'physical_network': physical_network, - 'segmentation_id': segmentation_id} - if cfg.CONF.AGENT.rpc_support_old_agents: - kwargs['vlan_id'] = vlan_id - msg = self.make_msg('port_update', **kwargs) - self.fanout_cast(context, msg, - topic=self.topic_port_update) - - -class LinuxBridgePluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - portbindings_db.PortBindingMixin): - """Implement the Neutron abstractions using Linux bridging. - - A new VLAN is created for each network. An agent is relied upon - to perform the actual Linux bridge configuration on each host. - - The provider extension is also supported. As discussed in - https://bugs.launchpad.net/neutron/+bug/1023156, this class could - be simplified, and filtering on extended attributes could be - handled, by adding support for extended attributes to the - NeutronDbPluginV2 base class. When that occurs, this class should - be updated to take advantage of it. - - The port binding extension enables an external application relay - information to and from the plugin. - """ - - # This attribute specifies whether the plugin supports or not - # bulk/pagination/sorting operations. Name mangling is used in - # order to ensure it is qualified by class - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - _supported_extension_aliases = ["provider", "external-net", "router", - "ext-gw-mode", "binding", "quotas", - "security-group", "agent", "extraroute", - "l3_agent_scheduler", - "dhcp_agent_scheduler"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - super(LinuxBridgePluginV2, self).__init__() - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_BRIDGE, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - self._parse_network_vlan_ranges() - db.sync_network_states(self.network_vlan_ranges) - self.tenant_network_type = cfg.CONF.VLANS.tenant_network_type - if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, - svc_constants.TYPE_VLAN, - svc_constants.TYPE_NONE]: - LOG.error(_("Invalid tenant_network_type: %s. " - "Service terminated!"), - self.tenant_network_type) - sys.exit(1) - self._setup_rpc() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - cfg.CONF.router_scheduler_driver - ) - LOG.debug(_("Linux Bridge Plugin initialization complete")) - - def _setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [LinuxBridgeRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - self.notifier = AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( - l3_rpc_agent_api.L3AgentNotifyAPI() - ) - - def _parse_network_vlan_ranges(self): - try: - self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( - cfg.CONF.VLANS.network_vlan_ranges) - except Exception as ex: - LOG.error(_("%s. Agent terminated!"), ex) - sys.exit(1) - LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) - - def _add_network_vlan_range(self, physical_network, vlan_min, vlan_max): - self._add_network(physical_network) - self.network_vlan_ranges[physical_network].append((vlan_min, vlan_max)) - - def _add_network(self, physical_network): - if physical_network not in self.network_vlan_ranges: - self.network_vlan_ranges[physical_network] = [] - - def _extend_network_dict_provider(self, context, network): - binding = db.get_network_binding(context.session, network['id']) - if binding.vlan_id == constants.FLAT_VLAN_ID: - network[provider.NETWORK_TYPE] = svc_constants.TYPE_FLAT - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = None - elif binding.vlan_id == constants.LOCAL_VLAN_ID: - network[provider.NETWORK_TYPE] = svc_constants.TYPE_LOCAL - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = None - else: - network[provider.NETWORK_TYPE] = svc_constants.TYPE_VLAN - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = binding.vlan_id - - def _process_provider_create(self, context, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) - - if not (network_type_set or physical_network_set or - segmentation_id_set): - return (None, None, None) - - if not network_type_set: - msg = _("provider:network_type required") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == svc_constants.TYPE_FLAT: - if segmentation_id_set: - msg = _("provider:segmentation_id specified for flat network") - raise n_exc.InvalidInput(error_message=msg) - else: - segmentation_id = constants.FLAT_VLAN_ID - elif network_type == svc_constants.TYPE_VLAN: - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - if not utils.is_valid_vlan_tag(segmentation_id): - msg = (_("provider:segmentation_id out of range " - "(%(min_id)s through %(max_id)s)") % - {'min_id': q_const.MIN_VLAN_TAG, - 'max_id': q_const.MAX_VLAN_TAG}) - raise n_exc.InvalidInput(error_message=msg) - elif network_type == svc_constants.TYPE_LOCAL: - if physical_network_set: - msg = _("provider:physical_network specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - else: - physical_network = None - if segmentation_id_set: - msg = _("provider:segmentation_id specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - else: - segmentation_id = constants.LOCAL_VLAN_ID - else: - msg = _("provider:network_type %s not supported") % network_type - raise n_exc.InvalidInput(error_message=msg) - - if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: - if physical_network_set: - if physical_network not in self.network_vlan_ranges: - msg = (_("Unknown provider:physical_network %s") % - physical_network) - raise n_exc.InvalidInput(error_message=msg) - elif 'default' in self.network_vlan_ranges: - physical_network = 'default' - else: - msg = _("provider:physical_network required") - raise n_exc.InvalidInput(error_message=msg) - - return (network_type, physical_network, segmentation_id) - - def create_network(self, context, network): - (network_type, physical_network, - vlan_id) = self._process_provider_create(context, - network['network']) - - session = context.session - with session.begin(subtransactions=True): - #set up default security groups - tenant_id = self._get_tenant_id_for_create( - context, network['network']) - self._ensure_default_security_group(context, tenant_id) - - if not network_type: - # tenant network - network_type = self.tenant_network_type - if network_type == svc_constants.TYPE_NONE: - raise n_exc.TenantNetworksDisabled() - elif network_type == svc_constants.TYPE_VLAN: - physical_network, vlan_id = db.reserve_network(session) - else: # TYPE_LOCAL - vlan_id = constants.LOCAL_VLAN_ID - else: - # provider network - if network_type in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_FLAT]: - db.reserve_specific_network(session, physical_network, - vlan_id) - # no reservation needed for TYPE_LOCAL - net = super(LinuxBridgePluginV2, self).create_network(context, - network) - db.add_network_binding(session, net['id'], - physical_network, vlan_id) - self._process_l3_create(context, net, network['network']) - self._extend_network_dict_provider(context, net) - # note - exception will rollback entire transaction - return net - - def update_network(self, context, id, network): - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - net = super(LinuxBridgePluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - self._extend_network_dict_provider(context, net) - return net - - def delete_network(self, context, id): - session = context.session - with session.begin(subtransactions=True): - binding = db.get_network_binding(session, id) - self._process_l3_delete(context, id) - super(LinuxBridgePluginV2, self).delete_network(context, id) - if binding.vlan_id != constants.LOCAL_VLAN_ID: - db.release_network(session, binding.physical_network, - binding.vlan_id, self.network_vlan_ranges) - # the network_binding record is deleted via cascade from - # the network record, so explicit removal is not necessary - self.notifier.network_delete(context, id) - - def get_network(self, context, id, fields=None): - session = context.session - with session.begin(subtransactions=True): - net = super(LinuxBridgePluginV2, self).get_network(context, - id, None) - self._extend_network_dict_provider(context, net) - return self._fields(net, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - session = context.session - with session.begin(subtransactions=True): - nets = super(LinuxBridgePluginV2, - self).get_networks(context, filters, None, sorts, - limit, marker, page_reverse) - for net in nets: - self._extend_network_dict_provider(context, net) - - return [self._fields(net, fields) for net in nets] - - def create_port(self, context, port): - session = context.session - port_data = port['port'] - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - # Set port status as 'DOWN'. This will be updated by agent - port['port']['status'] = q_const.PORT_STATUS_DOWN - - port = super(LinuxBridgePluginV2, - self).create_port(context, port) - self._process_portbindings_create_and_update(context, - port_data, - port) - self._process_port_create_security_group( - context, port, sgids) - self.notify_security_groups_member_updated(context, port) - return port - - def update_port(self, context, id, port): - original_port = self.get_port(context, id) - session = context.session - need_port_update_notify = False - - with session.begin(subtransactions=True): - updated_port = super(LinuxBridgePluginV2, self).update_port( - context, id, port) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - need_port_update_notify = self.update_security_group_on_port( - context, id, port, original_port, updated_port) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - - if original_port['admin_state_up'] != updated_port['admin_state_up']: - need_port_update_notify = True - - if need_port_update_notify: - self._notify_port_updated(context, updated_port) - return updated_port - - def delete_port(self, context, id, l3_port_check=True): - - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - - session = context.session - with session.begin(subtransactions=True): - self.disassociate_floatingips(context, id) - port = self.get_port(context, id) - self._delete_port_security_group_bindings(context, id) - super(LinuxBridgePluginV2, self).delete_port(context, id) - - self.notify_security_groups_member_updated(context, port) - - def _notify_port_updated(self, context, port): - binding = db.get_network_binding(context.session, - port['network_id']) - self.notifier.port_update(context, port, - binding.physical_network, - binding.vlan_id) diff --git a/neutron/plugins/metaplugin/README b/neutron/plugins/metaplugin/README deleted file mode 100644 index 8dbc47756..000000000 --- a/neutron/plugins/metaplugin/README +++ /dev/null @@ -1,92 +0,0 @@ -# -- Background - -This plugin supports multiple plugin at same time. This plugin is for L3 connectivility -between networks which are realized by different plugins.This plugin adds new attributes 'flavor:network' and 'flavor:router". -flavor:network corresponds to specific l2 plugin ( flavor-plugin mapping could be configurable by plugin_list config. -flavor:router corresponds to specific l3 plugin ( flavor-plugin mapping could be configurable by l3_plugin_list config. Note that Metaplugin can provide l3 functionaliteis for l2 plugin which didn't support l3 extension yet. -This plugin also support extensions. We can map extension to plugin by using extension_map config. - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -connection = mysql://root:password@localhost/neutron_metaplugin?charset=utf8 - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implgies an infinite retry count -# max_retries = 10 -# Database reconnection interval in seconds - in event connectivity is lost -retry_interval = 2 - -[meta] -## This is list of flavor:neutron_plugins -# extension method is used in the order of this list -plugin_list= 'openvswitch:neutron.plugins.openvswitch.ovs_neutron_plugin.OVSneutronPluginV2,linuxbridge:neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2' -# plugin for l3 -l3_plugin_list= 'openvswitch:neutron.plugins.openvswitch.ovs_neutron_plugin.OVSneutronPluginV2,linuxbridge:neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2' - -# Default value of flavor -default_flavor = 'openvswitch' -# Default value for l3 -default_l3_flavor = 'openvswitch' - -# supported extensions -supported_extension_aliases = 'providernet' -# specific method map for each flavor to extensions -extension_map = 'get_port_stats:nvp' - -# -- BridgeDriver Configration -# In order to use metaplugin, you should use MetaDriver. Following configation is needed. - -[DEFAULT] -# Meta Plugin -# Mapping between flavor and driver -meta_flavor_driver_mappings = openvswitch:neutron.agent.linux.interface.OVSInterfaceDriver, linuxbridge:neutron.agent.linux.interface.BridgeInterfaceDriver -# interface driver for MetaPlugin -interface_driver = neutron.agent.linux.interface.MetaInterfaceDriver - -[proxy] -auth_url = http://10.0.0.1:35357/v2.0 -auth_region = RegionOne -admin_tenant_name = service -admin_user = neutron -admin_password = password - - -# -- Agent -Agents for Metaplugin are in neutron/plugins/metaplugin/agent -linuxbridge_neutron_agent and ovs_neutron_agent is available. - -# -- Extensions - -- flavor -MetaPlugin supports flavor and provider net extension. -Metaplugin select plugin_list using flavor. -One plugin may use multiple flavor value. If the plugin support flavor, it may provide -multiple flavor of network. - -- Attribute extension -Each plugin can use attribute extension such as provider_net, if you specify that in supported_extension_aliases. - -- providernet -Vlan ID range of each plugin should be different, since Metaplugin dose not manage that. - -#- limitations - -Basically, All plugin should inherit NeutronDbPluginV2. -Metaplugin assumes all plugin share same Database especially for IPAM part in NeutronV2 API. -You can use another plugin if you use ProxyPluginV2, which proxies request to the another neutron server. - -Example flavor configration for ProxyPluginV2 - -meta_flavor_driver_mappings = "openvswitch:neutron.agent.linux.interface.OVSInterfaceDriver,proxy:neutron.plugins.metaplugin.proxy_neutron_plugin.ProxyPluginV2" - -- Limited L3 support -In folsom version, l3 is an extension. There is no way to extend exntension attributes. -so you can set flavor:router value but you can't get flavor:router value in API output. -L3 agent dont's support flavor:router. - - - diff --git a/neutron/plugins/metaplugin/__init__.py b/neutron/plugins/metaplugin/__init__.py deleted file mode 100644 index d8bce7745..000000000 --- a/neutron/plugins/metaplugin/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/metaplugin/common/__init__.py b/neutron/plugins/metaplugin/common/__init__.py deleted file mode 100644 index d8bce7745..000000000 --- a/neutron/plugins/metaplugin/common/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/metaplugin/common/config.py b/neutron/plugins/metaplugin/common/config.py deleted file mode 100644 index 26978d71b..000000000 --- a/neutron/plugins/metaplugin/common/config.py +++ /dev/null @@ -1,80 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -meta_plugin_opts = [ - cfg.StrOpt( - 'plugin_list', - default='', - help=_("Comma separated list of flavor:neutron_plugin for " - "plugins to load. Extension method is searched in the " - "list order and the first one is used.")), - cfg.StrOpt( - 'l3_plugin_list', - default='', - help=_("Comma separated list of flavor:neutron_plugin for L3 " - "service plugins to load. This is intended for specifying " - "L2 plugins which support L3 functions. If you use a router " - "service plugin, set this blank.")), - cfg.StrOpt( - 'default_flavor', - default='', - help=_("Default flavor to use, when flavor:network is not " - "specified at network creation.")), - cfg.StrOpt( - 'default_l3_flavor', - default='', - help=_("Default L3 flavor to use, when flavor:router is not " - "specified at router creation. Ignored if 'l3_plugin_list' " - "is blank.")), - cfg.StrOpt( - 'supported_extension_aliases', - default='', - help=_("Comma separated list of supported extension aliases.")), - cfg.StrOpt( - 'extension_map', - default='', - help=_("Comma separated list of method:flavor to select specific " - "plugin for a method. This has priority over method search " - "order based on 'plugin_list'.")), - cfg.StrOpt( - 'rpc_flavor', - default='', - help=_("Specifies flavor for plugin to handle 'q-plugin' RPC " - "requests.")), -] - -proxy_plugin_opts = [ - cfg.StrOpt('admin_user', - help=_("Admin user")), - cfg.StrOpt('admin_password', - help=_("Admin password"), - secret=True), - cfg.StrOpt('admin_tenant_name', - help=_("Admin tenant name")), - cfg.StrOpt('auth_url', - help=_("Authentication URL")), - cfg.StrOpt('auth_strategy', default='keystone', - help=_("The type of authentication to use")), - cfg.StrOpt('auth_region', - help=_("Authentication region")), -] - -cfg.CONF.register_opts(meta_plugin_opts, "META") -cfg.CONF.register_opts(proxy_plugin_opts, "PROXY") diff --git a/neutron/plugins/metaplugin/meta_db_v2.py b/neutron/plugins/metaplugin/meta_db_v2.py deleted file mode 100644 index 68c9055ff..000000000 --- a/neutron/plugins/metaplugin/meta_db_v2.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.orm import exc - -from neutron.plugins.metaplugin import meta_models_v2 - - -def get_flavor_by_network(session, net_id): - try: - binding = (session.query(meta_models_v2.NetworkFlavor). - filter_by(network_id=net_id). - one()) - except exc.NoResultFound: - return None - return binding.flavor - - -def add_network_flavor_binding(session, flavor, net_id): - binding = meta_models_v2.NetworkFlavor(flavor=flavor, network_id=net_id) - session.add(binding) - return binding - - -def get_flavor_by_router(session, router_id): - try: - binding = (session.query(meta_models_v2.RouterFlavor). - filter_by(router_id=router_id). - one()) - except exc.NoResultFound: - return None - return binding.flavor - - -def add_router_flavor_binding(session, flavor, router_id): - binding = meta_models_v2.RouterFlavor(flavor=flavor, router_id=router_id) - session.add(binding) - return binding diff --git a/neutron/plugins/metaplugin/meta_models_v2.py b/neutron/plugins/metaplugin/meta_models_v2.py deleted file mode 100644 index 566d1d8d8..000000000 --- a/neutron/plugins/metaplugin/meta_models_v2.py +++ /dev/null @@ -1,43 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy import Column, String - -from neutron.db import models_v2 - - -class NetworkFlavor(models_v2.model_base.BASEV2): - """Represents a binding of network_id to flavor.""" - flavor = Column(String(255)) - network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', - ondelete="CASCADE"), - primary_key=True) - - def __repr__(self): - return "" % (self.flavor, self.network_id) - - -class RouterFlavor(models_v2.model_base.BASEV2): - """Represents a binding of router_id to flavor.""" - flavor = Column(String(255)) - router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', - ondelete="CASCADE"), - primary_key=True) - - def __repr__(self): - return "" % (self.flavor, self.router_id) diff --git a/neutron/plugins/metaplugin/meta_neutron_plugin.py b/neutron/plugins/metaplugin/meta_neutron_plugin.py deleted file mode 100644 index 92a962846..000000000 --- a/neutron/plugins/metaplugin/meta_neutron_plugin.py +++ /dev/null @@ -1,419 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.common import exceptions as exc -from neutron.common import topics -from neutron import context as neutron_context -from neutron.db import api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.extensions import flavor as ext_flavor -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.metaplugin.common import config # noqa -from neutron.plugins.metaplugin import meta_db_v2 -from neutron.plugins.metaplugin import meta_models_v2 - - -LOG = logging.getLogger(__name__) - - -# Hooks used to select records which belong a target plugin. -def _meta_network_model_hook(context, original_model, query): - return query.outerjoin(meta_models_v2.NetworkFlavor, - meta_models_v2.NetworkFlavor.network_id == - models_v2.Network.id) - - -def _meta_port_model_hook(context, original_model, query): - return query.join(meta_models_v2.NetworkFlavor, - meta_models_v2.NetworkFlavor.network_id == - models_v2.Port.network_id) - - -def _meta_flavor_filter_hook(query, filters): - if ext_flavor.FLAVOR_NETWORK in filters: - return query.filter(meta_models_v2.NetworkFlavor.flavor == - filters[ext_flavor.FLAVOR_NETWORK][0]) - return query - - -# Metaplugin Exceptions -class FlavorNotFound(exc.NotFound): - message = _("Flavor %(flavor)s could not be found") - - -class FaildToAddFlavorBinding(exc.NeutronException): - message = _("Failed to add flavor binding") - - -class MetaPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin): - - def __init__(self, configfile=None): - super(MetaPluginV2, self).__init__() - LOG.debug(_("Start initializing metaplugin")) - self.supported_extension_aliases = ['flavor', 'external-net'] - if cfg.CONF.META.supported_extension_aliases: - cfg_aliases = cfg.CONF.META.supported_extension_aliases.split(',') - self.supported_extension_aliases += cfg_aliases - - # Ignore config option overapping - def _is_opt_registered(opts, opt): - if opt.dest in opts: - return True - else: - return False - - cfg._is_opt_registered = _is_opt_registered - - # Keep existing tables if multiple plugin use same table name. - db.model_base.NeutronBase.__table_args__ = {'keep_existing': True} - - self.plugins = {} - - plugin_list = [plugin_set.split(':') - for plugin_set - in cfg.CONF.META.plugin_list.split(',')] - self.rpc_flavor = cfg.CONF.META.rpc_flavor - topic_save = topics.PLUGIN - topic_fake = topic_save + '-metaplugin' - for flavor, plugin_provider in plugin_list: - # Rename topic used by a plugin other than rpc_flavor during - # loading the plugin instance if rpc_flavor is specified. - # This enforces the plugin specified by rpc_flavor is only - # consumer of 'q-plugin'. It is a bit tricky but there is no - # bad effect. - if self.rpc_flavor and self.rpc_flavor != flavor: - topics.PLUGIN = topic_fake - self.plugins[flavor] = self._load_plugin(plugin_provider) - topics.PLUGIN = topic_save - - self.l3_plugins = {} - if cfg.CONF.META.l3_plugin_list: - l3_plugin_list = [plugin_set.split(':') - for plugin_set - in cfg.CONF.META.l3_plugin_list.split(',')] - for flavor, plugin_provider in l3_plugin_list: - if flavor in self.plugins: - self.l3_plugins[flavor] = self.plugins[flavor] - else: - # For l3 only plugin - self.l3_plugins[flavor] = self._load_plugin( - plugin_provider) - - self.default_flavor = cfg.CONF.META.default_flavor - if self.default_flavor not in self.plugins: - raise exc.Invalid(_('default_flavor %s is not plugin list') % - self.default_flavor) - - if self.l3_plugins: - self.default_l3_flavor = cfg.CONF.META.default_l3_flavor - if self.default_l3_flavor not in self.l3_plugins: - raise exc.Invalid(_('default_l3_flavor %s is not plugin list') - % self.default_l3_flavor) - self.supported_extension_aliases += ['router', 'ext-gw-mode', - 'extraroute'] - - if self.rpc_flavor and self.rpc_flavor not in self.plugins: - raise exc.Invalid(_('rpc_flavor %s is not plugin list') % - self.rpc_flavor) - - self.extension_map = {} - if not cfg.CONF.META.extension_map == '': - extension_list = [method_set.split(':') - for method_set - in cfg.CONF.META.extension_map.split(',')] - for method_name, flavor in extension_list: - self.extension_map[method_name] = flavor - - # Register hooks. - # The hooks are applied for each target plugin instance when - # calling the base class to get networks/ports so that only records - # which belong to the plugin are selected. - #NOTE: Doing registration here (within __init__()) is to avoid - # registration when merely importing this file. This is only - # for running whole unit tests. - db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( - models_v2.Network, - 'metaplugin_net', - _meta_network_model_hook, - None, - _meta_flavor_filter_hook) - db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( - models_v2.Port, - 'metaplugin_port', - _meta_port_model_hook, - None, - _meta_flavor_filter_hook) - - def _load_plugin(self, plugin_provider): - LOG.debug(_("Plugin location: %s"), plugin_provider) - plugin_klass = importutils.import_class(plugin_provider) - return plugin_klass() - - def _get_plugin(self, flavor): - if flavor not in self.plugins: - raise FlavorNotFound(flavor=flavor) - return self.plugins[flavor] - - def _get_l3_plugin(self, flavor): - if flavor not in self.l3_plugins: - raise FlavorNotFound(flavor=flavor) - return self.l3_plugins[flavor] - - def __getattr__(self, key): - # At first, try to pickup extension command from extension_map - - if key in self.extension_map: - flavor = self.extension_map[key] - plugin = self._get_plugin(flavor) - if plugin and hasattr(plugin, key): - return getattr(plugin, key) - - # Second, try to match extension method in order of plugin list - for flavor, plugin in self.plugins.items(): - if hasattr(plugin, key): - return getattr(plugin, key) - - # if no plugin support the method, then raise - raise AttributeError - - def _extend_network_dict(self, context, network): - flavor = self._get_flavor_by_network_id(context, network['id']) - network[ext_flavor.FLAVOR_NETWORK] = flavor - - def start_rpc_listeners(self): - return self.plugins[self.rpc_flavor].start_rpc_listeners() - - def rpc_workers_supported(self): - #NOTE: If a plugin which supports multiple RPC workers is desired - # to handle RPC, rpc_flavor must be specified. - return (self.rpc_flavor and - self.plugins[self.rpc_flavor].rpc_workers_supported()) - - def create_network(self, context, network): - n = network['network'] - flavor = n.get(ext_flavor.FLAVOR_NETWORK) - if str(flavor) not in self.plugins: - flavor = self.default_flavor - plugin = self._get_plugin(flavor) - net = plugin.create_network(context, network) - LOG.debug(_("Created network: %(net_id)s with flavor " - "%(flavor)s"), {'net_id': net['id'], 'flavor': flavor}) - try: - meta_db_v2.add_network_flavor_binding(context.session, - flavor, str(net['id'])) - except Exception: - LOG.exception(_('Failed to add flavor bindings')) - plugin.delete_network(context, net['id']) - raise FaildToAddFlavorBinding() - - LOG.debug(_("Created network: %s"), net['id']) - self._extend_network_dict(context, net) - return net - - def update_network(self, context, id, network): - flavor = meta_db_v2.get_flavor_by_network(context.session, id) - plugin = self._get_plugin(flavor) - return plugin.update_network(context, id, network) - - def delete_network(self, context, id): - flavor = meta_db_v2.get_flavor_by_network(context.session, id) - plugin = self._get_plugin(flavor) - return plugin.delete_network(context, id) - - def get_network(self, context, id, fields=None): - flavor = meta_db_v2.get_flavor_by_network(context.session, id) - plugin = self._get_plugin(flavor) - net = plugin.get_network(context, id, fields) - net['id'] = id - if not fields or ext_flavor.FLAVOR_NETWORK in fields: - self._extend_network_dict(context, net) - if fields and 'id' not in fields: - del net['id'] - return net - - def get_networks(self, context, filters=None, fields=None): - nets = [] - for flavor, plugin in self.plugins.items(): - if (filters and ext_flavor.FLAVOR_NETWORK in filters and - not flavor in filters[ext_flavor.FLAVOR_NETWORK]): - continue - if filters: - #NOTE: copy each time since a target plugin may modify - # plugin_filters. - plugin_filters = filters.copy() - else: - plugin_filters = {} - plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] - plugin_nets = plugin.get_networks(context, plugin_filters, fields) - for net in plugin_nets: - if not fields or ext_flavor.FLAVOR_NETWORK in fields: - net[ext_flavor.FLAVOR_NETWORK] = flavor - nets.append(net) - return nets - - def _get_flavor_by_network_id(self, context, network_id): - return meta_db_v2.get_flavor_by_network(context.session, network_id) - - def _get_flavor_by_router_id(self, context, router_id): - return meta_db_v2.get_flavor_by_router(context.session, router_id) - - def _get_plugin_by_network_id(self, context, network_id): - flavor = self._get_flavor_by_network_id(context, network_id) - return self._get_plugin(flavor) - - def create_port(self, context, port): - p = port['port'] - if 'network_id' not in p: - raise exc.NotFound - plugin = self._get_plugin_by_network_id(context, p['network_id']) - return plugin.create_port(context, port) - - def update_port(self, context, id, port): - port_in_db = self._get_port(context, id) - plugin = self._get_plugin_by_network_id(context, - port_in_db['network_id']) - return plugin.update_port(context, id, port) - - def delete_port(self, context, id, l3_port_check=True): - port_in_db = self._get_port(context, id) - plugin = self._get_plugin_by_network_id(context, - port_in_db['network_id']) - return plugin.delete_port(context, id, l3_port_check) - - # This is necessary since there is a case that - # NeutronManager.get_plugin()._make_port_dict is called. - def _make_port_dict(self, port): - context = neutron_context.get_admin_context() - plugin = self._get_plugin_by_network_id(context, - port['network_id']) - return plugin._make_port_dict(port) - - def get_port(self, context, id, fields=None): - port_in_db = self._get_port(context, id) - plugin = self._get_plugin_by_network_id(context, - port_in_db['network_id']) - return plugin.get_port(context, id, fields) - - def get_ports(self, context, filters=None, fields=None): - all_ports = [] - for flavor, plugin in self.plugins.items(): - if filters: - #NOTE: copy each time since a target plugin may modify - # plugin_filters. - plugin_filters = filters.copy() - else: - plugin_filters = {} - plugin_filters[ext_flavor.FLAVOR_NETWORK] = [flavor] - ports = plugin.get_ports(context, plugin_filters, fields) - all_ports += ports - return all_ports - - def create_subnet(self, context, subnet): - s = subnet['subnet'] - if 'network_id' not in s: - raise exc.NotFound - plugin = self._get_plugin_by_network_id(context, - s['network_id']) - return plugin.create_subnet(context, subnet) - - def update_subnet(self, context, id, subnet): - s = self.get_subnet(context, id) - plugin = self._get_plugin_by_network_id(context, - s['network_id']) - return plugin.update_subnet(context, id, subnet) - - def delete_subnet(self, context, id): - s = self.get_subnet(context, id) - plugin = self._get_plugin_by_network_id(context, - s['network_id']) - return plugin.delete_subnet(context, id) - - def _extend_router_dict(self, context, router): - flavor = self._get_flavor_by_router_id(context, router['id']) - router[ext_flavor.FLAVOR_ROUTER] = flavor - - def create_router(self, context, router): - r = router['router'] - flavor = r.get(ext_flavor.FLAVOR_ROUTER) - if str(flavor) not in self.l3_plugins: - flavor = self.default_l3_flavor - plugin = self._get_l3_plugin(flavor) - r_in_db = plugin.create_router(context, router) - LOG.debug(_("Created router: %(router_id)s with flavor " - "%(flavor)s"), - {'router_id': r_in_db['id'], 'flavor': flavor}) - try: - meta_db_v2.add_router_flavor_binding(context.session, - flavor, str(r_in_db['id'])) - except Exception: - LOG.exception(_('Failed to add flavor bindings')) - plugin.delete_router(context, r_in_db['id']) - raise FaildToAddFlavorBinding() - - LOG.debug(_("Created router: %s"), r_in_db['id']) - self._extend_router_dict(context, r_in_db) - return r_in_db - - def update_router(self, context, id, router): - flavor = meta_db_v2.get_flavor_by_router(context.session, id) - plugin = self._get_l3_plugin(flavor) - return plugin.update_router(context, id, router) - - def delete_router(self, context, id): - flavor = meta_db_v2.get_flavor_by_router(context.session, id) - plugin = self._get_l3_plugin(flavor) - return plugin.delete_router(context, id) - - def get_router(self, context, id, fields=None): - flavor = meta_db_v2.get_flavor_by_router(context.session, id) - plugin = self._get_l3_plugin(flavor) - router = plugin.get_router(context, id, fields) - if not fields or ext_flavor.FLAVOR_ROUTER in fields: - self._extend_router_dict(context, router) - return router - - def get_routers_with_flavor(self, context, filters=None, - fields=None): - collection = self._model_query(context, l3_db.Router) - r_model = meta_models_v2.RouterFlavor - collection = collection.join(r_model, - l3_db.Router.id == r_model.router_id) - if filters: - for key, value in filters.iteritems(): - if key == ext_flavor.FLAVOR_ROUTER: - column = meta_models_v2.RouterFlavor.flavor - else: - column = getattr(l3_db.Router, key, None) - if column: - collection = collection.filter(column.in_(value)) - return [self._make_router_dict(c, fields) for c in collection] - - def get_routers(self, context, filters=None, fields=None): - routers = self.get_routers_with_flavor(context, filters, - None) - return [self.get_router(context, router['id'], - fields) - for router in routers] diff --git a/neutron/plugins/metaplugin/proxy_neutron_plugin.py b/neutron/plugins/metaplugin/proxy_neutron_plugin.py deleted file mode 100644 index 61cc34026..000000000 --- a/neutron/plugins/metaplugin/proxy_neutron_plugin.py +++ /dev/null @@ -1,136 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Nachi Ueno, NTT MCL, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import l3_db -from neutron.openstack.common import log as logging -from neutronclient.common import exceptions -from neutronclient.v2_0 import client - - -LOG = logging.getLogger(__name__) - - -class ProxyPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - l3_db.L3_NAT_db_mixin): - supported_extension_aliases = ["external-net", "router"] - - def __init__(self, configfile=None): - super(ProxyPluginV2, self).__init__() - self.neutron = client.Client( - username=cfg.CONF.PROXY.admin_user, - password=cfg.CONF.PROXY.admin_password, - tenant_name=cfg.CONF.PROXY.admin_tenant_name, - auth_url=cfg.CONF.PROXY.auth_url, - auth_strategy=cfg.CONF.PROXY.auth_strategy, - region_name=cfg.CONF.PROXY.auth_region - ) - - def _get_client(self): - return self.neutron - - def create_subnet(self, context, subnet): - subnet_remote = self._get_client().create_subnet(subnet) - subnet['subnet']['id'] = subnet_remote['id'] - tenant_id = self._get_tenant_id_for_create(context, subnet['subnet']) - subnet['subnet']['tenant_id'] = tenant_id - try: - subnet_in_db = super(ProxyPluginV2, self).create_subnet( - context, subnet) - except Exception: - self._get_client().delete_subnet(subnet_remote['id']) - return subnet_in_db - - def update_subnet(self, context, id, subnet): - subnet_in_db = super(ProxyPluginV2, self).update_subnet( - context, id, subnet) - try: - self._get_client().update_subnet(id, subnet) - except Exception as e: - LOG.error(_("Update subnet failed: %s"), e) - return subnet_in_db - - def delete_subnet(self, context, id): - try: - self._get_client().delete_subnet(id) - except exceptions.NotFound: - LOG.warn(_("Subnet in remote have already deleted")) - return super(ProxyPluginV2, self).delete_subnet(context, id) - - def create_network(self, context, network): - network_remote = self._get_client().create_network(network) - network['network']['id'] = network_remote['id'] - tenant_id = self._get_tenant_id_for_create(context, network['network']) - network['network']['tenant_id'] = tenant_id - try: - network_in_db = super(ProxyPluginV2, self).create_network( - context, network) - except Exception: - self._get_client().delete_network(network_remote['id']) - return network_in_db - - def update_network(self, context, id, network): - network_in_db = super(ProxyPluginV2, self).update_network( - context, id, network) - try: - self._get_client().update_network(id, network) - except Exception as e: - LOG.error(_("Update network failed: %s"), e) - return network_in_db - - def delete_network(self, context, id): - try: - self._get_client().delete_network(id) - except exceptions.NetworkNotFoundClient: - LOG.warn(_("Network in remote have already deleted")) - return super(ProxyPluginV2, self).delete_network(context, id) - - def create_port(self, context, port): - port_remote = self._get_client().create_port(port) - port['port']['id'] = port_remote['id'] - tenant_id = self._get_tenant_id_for_create(context, port['port']) - port['port']['tenant_id'] = tenant_id - try: - port_in_db = super(ProxyPluginV2, self).create_port( - context, port) - except Exception: - self._get_client().delete_port(port_remote['id']) - return port_in_db - - def update_port(self, context, id, port): - port_in_db = super(ProxyPluginV2, self).update_port( - context, id, port) - try: - self._get_client().update_port(id, port) - except Exception as e: - LOG.error(_("Update port failed: %s"), e) - return port_in_db - - def delete_port(self, context, id, l3_port_check=True): - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - self.disassociate_floatingips(context, id) - - try: - self._get_client().delete_port(id) - except exceptions.PortNotFoundClient: - LOG.warn(_("Port in remote have already deleted")) - return super(ProxyPluginV2, self).delete_port(context, id) diff --git a/neutron/plugins/midonet/__init__.py b/neutron/plugins/midonet/__init__.py deleted file mode 100644 index 439ff6594..000000000 --- a/neutron/plugins/midonet/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2012 Midokura Japan K.K. -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/midonet/agent/__init__.py b/neutron/plugins/midonet/agent/__init__.py deleted file mode 100644 index 9fddc1976..000000000 --- a/neutron/plugins/midonet/agent/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/midonet/agent/midonet_driver.py b/neutron/plugins/midonet/agent/midonet_driver.py deleted file mode 100644 index ada98a3d1..000000000 --- a/neutron/plugins/midonet/agent/midonet_driver.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Rossella Sblendido, Midokura Japan KK -# @author: Tomoe Sugihara, Midokura Japan KK -# @author: Ryu Ishimoto, Midokura Japan KK - -from neutron.agent.linux import dhcp -from neutron.openstack.common import log as logging -from neutron.plugins.midonet.common import config # noqa - -LOG = logging.getLogger(__name__) - - -class DhcpNoOpDriver(dhcp.DhcpLocalProcess): - - @classmethod - def existing_dhcp_networks(cls, conf, root_helper): - """Return a list of existing networks ids that we have configs for.""" - return [] - - @classmethod - def check_version(cls): - """Execute version checks on DHCP server.""" - return float(1.0) - - def disable(self, retain_port=False): - """Disable DHCP for this network.""" - if not retain_port: - self.device_manager.destroy(self.network, self.interface_name) - self._remove_config_files() - - def reload_allocations(self): - """Force the DHCP server to reload the assignment database.""" - pass - - def spawn_process(self): - pass diff --git a/neutron/plugins/midonet/common/__init__.py b/neutron/plugins/midonet/common/__init__.py deleted file mode 100644 index 9fddc1976..000000000 --- a/neutron/plugins/midonet/common/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/midonet/common/config.py b/neutron/plugins/midonet/common/config.py deleted file mode 100644 index 924474f5b..000000000 --- a/neutron/plugins/midonet/common/config.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2012 Midokura Japan K.K. -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Tomoe Sugihara, Midokura Japan KK - -from oslo.config import cfg - -midonet_opts = [ - cfg.StrOpt('midonet_uri', default='http://localhost:8080/midonet-api', - help=_('MidoNet API server URI.')), - cfg.StrOpt('username', default='admin', - help=_('MidoNet admin username.')), - cfg.StrOpt('password', default='passw0rd', - secret=True, - help=_('MidoNet admin password.')), - cfg.StrOpt('project_id', - default='77777777-7777-7777-7777-777777777777', - help=_('ID of the project that MidoNet admin user' - 'belongs to.')), - cfg.StrOpt('provider_router_id', - help=_('Virtual provider router ID.')), - cfg.StrOpt('mode', - default='dev', - help=_('Operational mode. Internal dev use only.')), - cfg.StrOpt('midonet_host_uuid_path', - default='/etc/midolman/host_uuid.properties', - help=_('Path to midonet host uuid file')) -] - - -cfg.CONF.register_opts(midonet_opts, "MIDONET") diff --git a/neutron/plugins/midonet/common/net_util.py b/neutron/plugins/midonet/common/net_util.py deleted file mode 100644 index 884048675..000000000 --- a/neutron/plugins/midonet/common/net_util.py +++ /dev/null @@ -1,68 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ryu Ishimoto, Midokura Japan KK - - -from neutron.common import constants - - -def subnet_str(cidr): - """Convert the cidr string to x.x.x.x_y format - - :param cidr: CIDR in x.x.x.x/y format - """ - if cidr is None: - return None - return cidr.replace("/", "_") - - -def net_addr(addr): - """Get network address prefix and length from a given address.""" - if addr is None: - return (None, None) - nw_addr, nw_len = addr.split('/') - nw_len = int(nw_len) - return nw_addr, nw_len - - -def get_ethertype_value(ethertype): - """Convert string representation of ethertype to the numerical.""" - if ethertype is None: - return None - mapping = { - 'ipv4': 0x0800, - 'ipv6': 0x86DD, - 'arp': 0x806 - } - return mapping.get(ethertype.lower()) - - -def get_protocol_value(protocol): - """Convert string representation of protocol to the numerical.""" - if protocol is None: - return None - - if isinstance(protocol, int): - return protocol - - mapping = { - constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, - constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, - constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP - } - return mapping.get(protocol.lower()) diff --git a/neutron/plugins/midonet/midonet_lib.py b/neutron/plugins/midonet/midonet_lib.py deleted file mode 100644 index 74d2bae6a..000000000 --- a/neutron/plugins/midonet/midonet_lib.py +++ /dev/null @@ -1,696 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2012 Midokura Japan K.K. -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Tomoe Sugihara, Midokura Japan KK -# @author: Ryu Ishimoto, Midokura Japan KK -# @author: Rossella Sblendido, Midokura Japan KK -# @author: Duarte Nunes, Midokura Japan KK - -from midonetclient import exc -from webob import exc as w_exc - -from neutron.common import exceptions as n_exc -from neutron.openstack.common import log as logging -from neutron.plugins.midonet.common import net_util - -LOG = logging.getLogger(__name__) - - -def handle_api_error(fn): - """Wrapper for methods that throws custom exceptions.""" - def wrapped(*args, **kwargs): - try: - return fn(*args, **kwargs) - except (w_exc.HTTPException, - exc.MidoApiConnectionError) as ex: - raise MidonetApiException(msg=ex) - return wrapped - - -class MidonetResourceNotFound(n_exc.NotFound): - message = _('MidoNet %(resource_type)s %(id)s could not be found') - - -class MidonetApiException(n_exc.NeutronException): - message = _("MidoNet API error: %(msg)s") - - -class MidoClient: - - def __init__(self, mido_api): - self.mido_api = mido_api - - @classmethod - def _fill_dto(cls, dto, fields): - for field_name, field_value in fields.iteritems(): - # We assume the setters are named the - # same way as the attributes themselves. - try: - getattr(dto, field_name)(field_value) - except AttributeError: - pass - return dto - - @classmethod - def _create_dto(cls, dto, fields): - return cls._fill_dto(dto, fields).create() - - @classmethod - def _update_dto(cls, dto, fields): - return cls._fill_dto(dto, fields).update() - - @handle_api_error - def create_bridge(self, **kwargs): - """Create a new bridge - - :param \**kwargs: configuration of the new bridge - :returns: newly created bridge - """ - LOG.debug(_("MidoClient.create_bridge called: " - "kwargs=%(kwargs)s"), {'kwargs': kwargs}) - return self._create_dto(self.mido_api.add_bridge(), kwargs) - - @handle_api_error - def delete_bridge(self, id): - """Delete a bridge - - :param id: id of the bridge - """ - LOG.debug(_("MidoClient.delete_bridge called: id=%(id)s"), {'id': id}) - return self.mido_api.delete_bridge(id) - - @handle_api_error - def get_bridge(self, id): - """Get a bridge - - :param id: id of the bridge - :returns: requested bridge. None if bridge does not exist. - """ - LOG.debug(_("MidoClient.get_bridge called: id=%s"), id) - try: - return self.mido_api.get_bridge(id) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Bridge', id=id) - - @handle_api_error - def update_bridge(self, id, **kwargs): - """Update a bridge of the given id with the new fields - - :param id: id of the bridge - :param \**kwargs: the fields to update and their values - :returns: bridge object - """ - LOG.debug(_("MidoClient.update_bridge called: " - "id=%(id)s, kwargs=%(kwargs)s"), - {'id': id, 'kwargs': kwargs}) - try: - return self._update_dto(self.mido_api.get_bridge(id), kwargs) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Bridge', id=id) - - @handle_api_error - def create_dhcp(self, bridge, gateway_ip, cidr, host_rts=None, - dns_servers=None): - """Create a new DHCP entry - - :param bridge: bridge object to add dhcp to - :param gateway_ip: IP address of gateway - :param cidr: subnet represented as x.x.x.x/y - :param host_rts: list of routes set in the host - :param dns_servers: list of dns servers - :returns: newly created dhcp - """ - LOG.debug(_("MidoClient.create_dhcp called: bridge=%(bridge)s, " - "cidr=%(cidr)s, gateway_ip=%(gateway_ip)s, " - "host_rts=%(host_rts)s, dns_servers=%(dns_servers)s"), - {'bridge': bridge, 'cidr': cidr, 'gateway_ip': gateway_ip, - 'host_rts': host_rts, 'dns_servers': dns_servers}) - self.mido_api.add_bridge_dhcp(bridge, gateway_ip, cidr, - host_rts=host_rts, - dns_nservers=dns_servers) - - @handle_api_error - def add_dhcp_host(self, bridge, cidr, ip, mac): - """Add DHCP host entry - - :param bridge: bridge the DHCP is configured for - :param cidr: subnet represented as x.x.x.x/y - :param ip: IP address - :param mac: MAC address - """ - LOG.debug(_("MidoClient.add_dhcp_host called: bridge=%(bridge)s, " - "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), - {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) - subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) - if subnet is None: - raise MidonetApiException(msg=_("Tried to add to" - "non-existent DHCP")) - - subnet.add_dhcp_host().ip_addr(ip).mac_addr(mac).create() - - @handle_api_error - def remove_dhcp_host(self, bridge, cidr, ip, mac): - """Remove DHCP host entry - - :param bridge: bridge the DHCP is configured for - :param cidr: subnet represented as x.x.x.x/y - :param ip: IP address - :param mac: MAC address - """ - LOG.debug(_("MidoClient.remove_dhcp_host called: bridge=%(bridge)s, " - "cidr=%(cidr)s, ip=%(ip)s, mac=%(mac)s"), - {'bridge': bridge, 'cidr': cidr, 'ip': ip, 'mac': mac}) - subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) - if subnet is None: - LOG.warn(_("Tried to delete mapping from non-existent subnet")) - return - - for dh in subnet.get_dhcp_hosts(): - if dh.get_mac_addr() == mac and dh.get_ip_addr() == ip: - LOG.debug(_("MidoClient.remove_dhcp_host: Deleting %(dh)r"), - {"dh": dh}) - dh.delete() - - @handle_api_error - def delete_dhcp_host(self, bridge_id, cidr, ip, mac): - """Delete DHCP host entry - - :param bridge_id: id of the bridge of the DHCP - :param cidr: subnet represented as x.x.x.x/y - :param ip: IP address - :param mac: MAC address - """ - LOG.debug(_("MidoClient.delete_dhcp_host called: " - "bridge_id=%(bridge_id)s, cidr=%(cidr)s, ip=%(ip)s, " - "mac=%(mac)s"), {'bridge_id': bridge_id, - 'cidr': cidr, - 'ip': ip, 'mac': mac}) - bridge = self.get_bridge(bridge_id) - self.remove_dhcp_host(bridge, net_util.subnet_str(cidr), ip, mac) - - @handle_api_error - def delete_dhcp(self, bridge, cidr): - """Delete a DHCP entry - - :param bridge: bridge to remove DHCP from - :param cidr: subnet represented as x.x.x.x/y - """ - LOG.debug(_("MidoClient.delete_dhcp called: bridge=%(bridge)s, " - "cidr=%(cidr)s"), - {'bridge': bridge, 'cidr': cidr}) - dhcp_subnets = bridge.get_dhcp_subnets() - net_addr, net_len = net_util.net_addr(cidr) - if not dhcp_subnets: - raise MidonetApiException( - msg=_("Tried to delete non-existent DHCP")) - for dhcp in dhcp_subnets: - if dhcp.get_subnet_prefix() == net_addr: - dhcp.delete() - break - - @handle_api_error - def delete_port(self, id, delete_chains=False): - """Delete a port - - :param id: id of the port - """ - LOG.debug(_("MidoClient.delete_port called: id=%(id)s, " - "delete_chains=%(delete_chains)s"), - {'id': id, 'delete_chains': delete_chains}) - if delete_chains: - self.delete_port_chains(id) - - self.mido_api.delete_port(id) - - @handle_api_error - def get_port(self, id): - """Get a port - - :param id: id of the port - :returns: requested port. None if it does not exist - """ - LOG.debug(_("MidoClient.get_port called: id=%(id)s"), {'id': id}) - try: - return self.mido_api.get_port(id) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Port', id=id) - - @handle_api_error - def add_bridge_port(self, bridge, **kwargs): - """Add a port on a bridge - - :param bridge: bridge to add a new port to - :param \**kwargs: configuration of the new port - :returns: newly created port - """ - LOG.debug(_("MidoClient.add_bridge_port called: " - "bridge=%(bridge)s, kwargs=%(kwargs)s"), - {'bridge': bridge, 'kwargs': kwargs}) - return self._create_dto(self.mido_api.add_bridge_port(bridge), kwargs) - - @handle_api_error - def update_port(self, id, **kwargs): - """Update a port of the given id with the new fields - - :param id: id of the port - :param \**kwargs: the fields to update and their values - """ - LOG.debug(_("MidoClient.update_port called: " - "id=%(id)s, kwargs=%(kwargs)s"), - {'id': id, 'kwargs': kwargs}) - try: - return self._update_dto(self.mido_api.get_port(id), kwargs) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Port', id=id) - - @handle_api_error - def add_router_port(self, router, **kwargs): - """Add a new port to an existing router. - - :param router: router to add a new port to - :param \**kwargs: configuration of the new port - :returns: newly created port - """ - return self._create_dto(self.mido_api.add_router_port(router), kwargs) - - @handle_api_error - def create_router(self, **kwargs): - """Create a new router - - :param \**kwargs: configuration of the new router - :returns: newly created router - """ - LOG.debug(_("MidoClient.create_router called: " - "kwargs=%(kwargs)s"), {'kwargs': kwargs}) - return self._create_dto(self.mido_api.add_router(), kwargs) - - @handle_api_error - def delete_router(self, id): - """Delete a router - - :param id: id of the router - """ - LOG.debug(_("MidoClient.delete_router called: id=%(id)s"), {'id': id}) - return self.mido_api.delete_router(id) - - @handle_api_error - def get_router(self, id): - """Get a router with the given id - - :param id: id of the router - :returns: requested router object. None if it does not exist. - """ - LOG.debug(_("MidoClient.get_router called: id=%(id)s"), {'id': id}) - try: - return self.mido_api.get_router(id) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Router', id=id) - - @handle_api_error - def update_router(self, id, **kwargs): - """Update a router of the given id with the new name - - :param id: id of the router - :param \**kwargs: the fields to update and their values - :returns: router object - """ - LOG.debug(_("MidoClient.update_router called: " - "id=%(id)s, kwargs=%(kwargs)s"), - {'id': id, 'kwargs': kwargs}) - try: - return self._update_dto(self.mido_api.get_router(id), kwargs) - except w_exc.HTTPNotFound: - raise MidonetResourceNotFound(resource_type='Router', id=id) - - @handle_api_error - def delete_route(self, id): - return self.mido_api.delete_route(id) - - @handle_api_error - def add_dhcp_route_option(self, bridge, cidr, gw_ip, dst_ip): - """Add Option121 route to subnet - - :param bridge: Bridge to add the option route to - :param cidr: subnet represented as x.x.x.x/y - :param gw_ip: IP address of the next hop - :param dst_ip: IP address of the destination, in x.x.x.x/y format - """ - LOG.debug(_("MidoClient.add_dhcp_route_option called: " - "bridge=%(bridge)s, cidr=%(cidr)s, gw_ip=%(gw_ip)s" - "dst_ip=%(dst_ip)s"), - {"bridge": bridge, "cidr": cidr, "gw_ip": gw_ip, - "dst_ip": dst_ip}) - subnet = bridge.get_dhcp_subnet(net_util.subnet_str(cidr)) - if subnet is None: - raise MidonetApiException( - msg=_("Tried to access non-existent DHCP")) - prefix, length = dst_ip.split("/") - routes = [{'destinationPrefix': prefix, 'destinationLength': length, - 'gatewayAddr': gw_ip}] - cur_routes = subnet.get_opt121_routes() - if cur_routes: - routes = routes + cur_routes - subnet.opt121_routes(routes).update() - - @handle_api_error - def link(self, port, peer_id): - """Link a port to a given peerId.""" - self.mido_api.link(port, peer_id) - - @handle_api_error - def delete_port_routes(self, routes, port_id): - """Remove routes whose next hop port is the given port ID.""" - for route in routes: - if route.get_next_hop_port() == port_id: - self.mido_api.delete_route(route.get_id()) - - @handle_api_error - def get_router_routes(self, router_id): - """Get all routes for the given router.""" - return self.mido_api.get_router_routes(router_id) - - @handle_api_error - def unlink(self, port): - """Unlink a port - - :param port: port object - """ - LOG.debug(_("MidoClient.unlink called: port=%(port)s"), - {'port': port}) - if port.get_peer_id(): - self.mido_api.unlink(port) - else: - LOG.warn(_("Attempted to unlink a port that was not linked. %s"), - port.get_id()) - - @handle_api_error - def remove_rules_by_property(self, tenant_id, chain_name, key, value): - """Remove all the rules that match the provided key and value.""" - LOG.debug(_("MidoClient.remove_rules_by_property called: " - "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s" - "key=%(key)s, value=%(value)s"), - {'tenant_id': tenant_id, 'chain_name': chain_name, - 'key': key, 'value': value}) - chain = self.get_chain_by_name(tenant_id, chain_name) - if chain is None: - raise MidonetResourceNotFound(resource_type='Chain', - id=chain_name) - - for r in chain.get_rules(): - if key in r.get_properties(): - if r.get_properties()[key] == value: - self.mido_api.delete_rule(r.get_id()) - - @handle_api_error - def add_router_chains(self, router, inbound_chain_name, - outbound_chain_name): - """Create chains for a new router. - - Creates inbound and outbound chains for the router with the given - names, and the new chains are set on the router. - - :param router: router to set chains for - :param inbound_chain_name: Name of the inbound chain - :param outbound_chain_name: Name of the outbound chain - """ - LOG.debug(_("MidoClient.create_router_chains called: " - "router=%(router)s, inbound_chain_name=%(in_chain)s, " - "outbound_chain_name=%(out_chain)s"), - {"router": router, "in_chain": inbound_chain_name, - "out_chain": outbound_chain_name}) - tenant_id = router.get_tenant_id() - - inbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( - inbound_chain_name,).create() - outbound_chain = self.mido_api.add_chain().tenant_id(tenant_id).name( - outbound_chain_name).create() - - # set chains to in/out filters - router.inbound_filter_id(inbound_chain.get_id()).outbound_filter_id( - outbound_chain.get_id()).update() - return inbound_chain, outbound_chain - - @handle_api_error - def delete_router_chains(self, id): - """Deletes chains of a router. - - :param id: router ID to delete chains of - """ - LOG.debug(_("MidoClient.delete_router_chains called: " - "id=%(id)s"), {'id': id}) - router = self.get_router(id) - if (router.get_inbound_filter_id()): - self.mido_api.delete_chain(router.get_inbound_filter_id()) - - if (router.get_outbound_filter_id()): - self.mido_api.delete_chain(router.get_outbound_filter_id()) - - @handle_api_error - def delete_port_chains(self, id): - """Deletes chains of a port. - - :param id: port ID to delete chains of - """ - LOG.debug(_("MidoClient.delete_port_chains called: " - "id=%(id)s"), {'id': id}) - port = self.get_port(id) - if (port.get_inbound_filter_id()): - self.mido_api.delete_chain(port.get_inbound_filter_id()) - - if (port.get_outbound_filter_id()): - self.mido_api.delete_chain(port.get_outbound_filter_id()) - - @handle_api_error - def get_link_port(self, router, peer_router_id): - """Setup a route on the router to the next hop router.""" - LOG.debug(_("MidoClient.get_link_port called: " - "router=%(router)s, peer_router_id=%(peer_router_id)s"), - {'router': router, 'peer_router_id': peer_router_id}) - # Find the port linked between the two routers - link_port = None - for p in router.get_peer_ports(): - if p.get_device_id() == peer_router_id: - link_port = p - break - return link_port - - @handle_api_error - def add_router_route(self, router, type='Normal', - src_network_addr=None, src_network_length=None, - dst_network_addr=None, dst_network_length=None, - next_hop_port=None, next_hop_gateway=None, - weight=100): - """Setup a route on the router.""" - return self.mido_api.add_router_route( - router, type=type, src_network_addr=src_network_addr, - src_network_length=src_network_length, - dst_network_addr=dst_network_addr, - dst_network_length=dst_network_length, - next_hop_port=next_hop_port, next_hop_gateway=next_hop_gateway, - weight=weight) - - @handle_api_error - def add_static_nat(self, tenant_id, chain_name, from_ip, to_ip, port_id, - nat_type='dnat', **kwargs): - """Add a static NAT entry - - :param tenant_id: owner fo the chain to add a NAT to - :param chain_name: name of the chain to add a NAT to - :param from_ip: IP to translate from - :param from_ip: IP to translate from - :param to_ip: IP to translate to - :param port_id: port to match on - :param nat_type: 'dnat' or 'snat' - """ - LOG.debug(_("MidoClient.add_static_nat called: " - "tenant_id=%(tenant_id)s, chain_name=%(chain_name)s, " - "from_ip=%(from_ip)s, to_ip=%(to_ip)s, " - "port_id=%(port_id)s, nat_type=%(nat_type)s"), - {'tenant_id': tenant_id, 'chain_name': chain_name, - 'from_ip': from_ip, 'to_ip': to_ip, - 'portid': port_id, 'nat_type': nat_type}) - if nat_type not in ['dnat', 'snat']: - raise ValueError(_("Invalid NAT type passed in %s") % nat_type) - - chain = self.get_chain_by_name(tenant_id, chain_name) - nat_targets = [] - nat_targets.append( - {'addressFrom': to_ip, 'addressTo': to_ip, - 'portFrom': 0, 'portTo': 0}) - - rule = chain.add_rule().type(nat_type).flow_action('accept').position( - 1).nat_targets(nat_targets).properties(kwargs) - - if nat_type == 'dnat': - rule = rule.nw_dst_address(from_ip).nw_dst_length(32).in_ports( - [port_id]) - else: - rule = rule.nw_src_address(from_ip).nw_src_length(32).out_ports( - [port_id]) - - return rule.create() - - @handle_api_error - def add_dynamic_snat(self, tenant_id, pre_chain_name, post_chain_name, - snat_ip, port_id, **kwargs): - """Add SNAT masquerading rule - - MidoNet requires two rules on the router, one to do NAT to a range of - ports, and another to retrieve back the original IP in the return - flow. - """ - pre_chain = self.get_chain_by_name(tenant_id, pre_chain_name) - post_chain = self.get_chain_by_name(tenant_id, post_chain_name) - - pre_chain.add_rule().nw_dst_address(snat_ip).nw_dst_length( - 32).type('rev_snat').flow_action('accept').in_ports( - [port_id]).properties(kwargs).position(1).create() - - nat_targets = [] - nat_targets.append( - {'addressFrom': snat_ip, 'addressTo': snat_ip, - 'portFrom': 1, 'portTo': 65535}) - - post_chain.add_rule().type('snat').flow_action( - 'accept').nat_targets(nat_targets).out_ports( - [port_id]).properties(kwargs).position(1).create() - - @handle_api_error - def remove_static_route(self, router, ip): - """Remove static route for the IP - - :param router: next hop router to remove the routes to - :param ip: IP address of the route to remove - """ - LOG.debug(_("MidoClient.remote_static_route called: " - "router=%(router)s, ip=%(ip)s"), - {'router': router, 'ip': ip}) - for r in router.get_routes(): - if (r.get_dst_network_addr() == ip and - r.get_dst_network_length() == 32): - self.mido_api.delete_route(r.get_id()) - - @handle_api_error - def update_port_chains(self, port, inbound_chain_id, outbound_chain_id): - """Bind inbound and outbound chains to the port.""" - LOG.debug(_("MidoClient.update_port_chains called: port=%(port)s" - "inbound_chain_id=%(inbound_chain_id)s, " - "outbound_chain_id=%(outbound_chain_id)s"), - {"port": port, "inbound_chain_id": inbound_chain_id, - "outbound_chain_id": outbound_chain_id}) - port.inbound_filter_id(inbound_chain_id).outbound_filter_id( - outbound_chain_id).update() - - @handle_api_error - def create_chain(self, tenant_id, name): - """Create a new chain.""" - LOG.debug(_("MidoClient.create_chain called: tenant_id=%(tenant_id)s " - " name=%(name)s"), {"tenant_id": tenant_id, "name": name}) - return self.mido_api.add_chain().tenant_id(tenant_id).name( - name).create() - - @handle_api_error - def delete_chain(self, id): - """Delete chain matching the ID.""" - LOG.debug(_("MidoClient.delete_chain called: id=%(id)s"), {"id": id}) - self.mido_api.delete_chain(id) - - @handle_api_error - def delete_chains_by_names(self, tenant_id, names): - """Delete chains matching the names given for a tenant.""" - LOG.debug(_("MidoClient.delete_chains_by_names called: " - "tenant_id=%(tenant_id)s names=%(names)s "), - {"tenant_id": tenant_id, "names": names}) - chains = self.mido_api.get_chains({'tenant_id': tenant_id}) - for c in chains: - if c.get_name() in names: - self.mido_api.delete_chain(c.get_id()) - - @handle_api_error - def get_chain_by_name(self, tenant_id, name): - """Get the chain by its name.""" - LOG.debug(_("MidoClient.get_chain_by_name called: " - "tenant_id=%(tenant_id)s name=%(name)s "), - {"tenant_id": tenant_id, "name": name}) - for c in self.mido_api.get_chains({'tenant_id': tenant_id}): - if c.get_name() == name: - return c - return None - - @handle_api_error - def get_port_group_by_name(self, tenant_id, name): - """Get the port group by name.""" - LOG.debug(_("MidoClient.get_port_group_by_name called: " - "tenant_id=%(tenant_id)s name=%(name)s "), - {"tenant_id": tenant_id, "name": name}) - for p in self.mido_api.get_port_groups({'tenant_id': tenant_id}): - if p.get_name() == name: - return p - return None - - @handle_api_error - def create_port_group(self, tenant_id, name): - """Create a port group - - Create a new port group for a given name and ID. - """ - LOG.debug(_("MidoClient.create_port_group called: " - "tenant_id=%(tenant_id)s name=%(name)s"), - {"tenant_id": tenant_id, "name": name}) - return self.mido_api.add_port_group().tenant_id(tenant_id).name( - name).create() - - @handle_api_error - def delete_port_group_by_name(self, tenant_id, name): - """Delete port group matching the name given for a tenant.""" - LOG.debug(_("MidoClient.delete_port_group_by_name called: " - "tenant_id=%(tenant_id)s name=%(name)s "), - {"tenant_id": tenant_id, "name": name}) - pgs = self.mido_api.get_port_groups({'tenant_id': tenant_id}) - for pg in pgs: - if pg.get_name() == name: - LOG.debug(_("Deleting pg %(id)s"), {"id": pg.get_id()}) - self.mido_api.delete_port_group(pg.get_id()) - - @handle_api_error - def add_port_to_port_group_by_name(self, tenant_id, name, port_id): - """Add a port to a port group with the given name.""" - LOG.debug(_("MidoClient.add_port_to_port_group_by_name called: " - "tenant_id=%(tenant_id)s name=%(name)s " - "port_id=%(port_id)s"), - {"tenant_id": tenant_id, "name": name, "port_id": port_id}) - pg = self.get_port_group_by_name(tenant_id, name) - if pg is None: - raise MidonetResourceNotFound(resource_type='PortGroup', id=name) - - pg = pg.add_port_group_port().port_id(port_id).create() - return pg - - @handle_api_error - def remove_port_from_port_groups(self, port_id): - """Remove a port binding from all the port groups.""" - LOG.debug(_("MidoClient.remove_port_from_port_groups called: " - "port_id=%(port_id)s"), {"port_id": port_id}) - port = self.get_port(port_id) - for pg in port.get_port_groups(): - pg.delete() - - @handle_api_error - def add_chain_rule(self, chain, action='accept', **kwargs): - """Create a new accept chain rule.""" - self.mido_api.add_chain_rule(chain, action, **kwargs) diff --git a/neutron/plugins/midonet/plugin.py b/neutron/plugins/midonet/plugin.py deleted file mode 100644 index 9a706d4a5..000000000 --- a/neutron/plugins/midonet/plugin.py +++ /dev/null @@ -1,1258 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (C) 2012 Midokura Japan K.K. -# Copyright (C) 2013 Midokura PTE LTD -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Takaaki Suzuki, Midokura Japan KK -# @author: Tomoe Sugihara, Midokura Japan KK -# @author: Ryu Ishimoto, Midokura Japan KK -# @author: Rossella Sblendido, Midokura Japan KK -# @author: Duarte Nunes, Midokura Japan KK - -from midonetclient import api -from oslo.config import cfg -from sqlalchemy.orm import exc as sa_exc - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.db import portbindings_db -from neutron.db import securitygroups_db -from neutron.extensions import external_net as ext_net -from neutron.extensions import l3 -from neutron.extensions import portbindings -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.midonet.common import config # noqa -from neutron.plugins.midonet.common import net_util -from neutron.plugins.midonet import midonet_lib - -LOG = logging.getLogger(__name__) - -EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO - -METADATA_DEFAULT_IP = "169.254.169.254/32" -OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP' -OS_SG_RULE_KEY = 'OS_SG_RULE_ID' -OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE' -PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s" -PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND" -PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND" -POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s" -SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS" -SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS" -SG_PORT_GROUP_NAME = "OS_PG_%s" -SNAT_RULE = 'SNAT' - - -def _get_nat_ips(type, fip): - """Get NAT IP address information. - - From the route type given, determine the source and target IP addresses - from the provided floating IP DB object. - """ - if type == 'pre-routing': - return fip["floating_ip_address"], fip["fixed_ip_address"] - elif type == 'post-routing': - return fip["fixed_ip_address"], fip["floating_ip_address"] - else: - raise ValueError(_("Invalid nat_type %s") % type) - - -def _nat_chain_names(router_id): - """Get the chain names for NAT. - - These names are used to associate MidoNet chains to the NAT rules - applied to the router. For each of these, there are two NAT types, - 'dnat' and 'snat' that are returned as keys, and the corresponding - chain names as their values. - """ - pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id - post_routing_name = POST_ROUTING_CHAIN_NAME % router_id - return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name} - - -def _sg_chain_names(sg_id): - """Get the chain names for security group. - - These names are used to associate a security group to MidoNet chains. - There are two names for ingress and egress security group directions. - """ - ingress = SG_INGRESS_CHAIN_NAME % sg_id - egress = SG_EGRESS_CHAIN_NAME % sg_id - return {'ingress': ingress, 'egress': egress} - - -def _port_chain_names(port_id): - """Get the chain names for a port. - - These are chains to hold security group chains. - """ - inbound = PORT_INBOUND_CHAIN_NAME % port_id - outbound = PORT_OUTBOUND_CHAIN_NAME % port_id - return {'inbound': inbound, 'outbound': outbound} - - -def _sg_port_group_name(sg_id): - """Get the port group name for security group.. - - This name is used to associate a security group to MidoNet port groups. - """ - return SG_PORT_GROUP_NAME % sg_id - - -def _rule_direction(sg_direction): - """Convert the SG direction to MidoNet direction - - MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and - 'egress'. Also, the direction is reversed since MidoNet sees it - from the network port's point of view, not the VM's. - """ - if sg_direction == 'ingress': - return 'outbound' - elif sg_direction == 'egress': - return 'inbound' - else: - raise ValueError(_("Unrecognized direction %s") % sg_direction) - - -def _is_router_interface_port(port): - """Check whether the given port is a router interface port.""" - device_owner = port['device_owner'] - return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF) - - -def _is_router_gw_port(port): - """Check whether the given port is a router gateway port.""" - device_owner = port['device_owner'] - return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW) - - -def _is_vif_port(port): - """Check whether the given port is a standard VIF port.""" - device_owner = port['device_owner'] - return (not _is_dhcp_port(port) and - device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW, - l3_db.DEVICE_OWNER_ROUTER_INTF)) - - -def _is_dhcp_port(port): - """Check whether the given port is a DHCP port.""" - device_owner = port['device_owner'] - return device_owner.startswith(constants.DEVICE_OWNER_DHCP) - - -def _check_resource_exists(func, id, name, raise_exc=False): - """Check whether the given resource exists in MidoNet data store.""" - try: - func(id) - except midonet_lib.MidonetResourceNotFound as exc: - LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."), - {"name": name, "id": id}) - if raise_exc: - raise MidonetPluginException(msg=exc) - - -class MidoRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin): - RPC_API_VERSION = '1.1' - - -class MidonetPluginException(n_exc.NeutronException): - message = _("%(msg)s") - - -class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - portbindings_db.PortBindingMixin, - external_net_db.External_net_db_mixin, - l3_db.L3_NAT_db_mixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - securitygroups_db.SecurityGroupDbMixin): - - supported_extension_aliases = ['external-net', 'router', 'security-group', - 'agent', 'dhcp_agent_scheduler', 'binding'] - __native_bulk_support = False - - def __init__(self): - super(MidonetPluginV2, self).__init__() - # Read config values - midonet_conf = cfg.CONF.MIDONET - midonet_uri = midonet_conf.midonet_uri - admin_user = midonet_conf.username - admin_pass = midonet_conf.password - admin_project_id = midonet_conf.project_id - self.provider_router_id = midonet_conf.provider_router_id - self.provider_router = None - - self.mido_api = api.MidonetApi(midonet_uri, admin_user, - admin_pass, - project_id=admin_project_id) - self.client = midonet_lib.MidoClient(self.mido_api) - - # self.provider_router_id should have been set. - if self.provider_router_id is None: - msg = _('provider_router_id should be configured in the plugin ' - 'config file') - LOG.exception(msg) - raise MidonetPluginException(msg=msg) - - self.setup_rpc() - - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - - def _get_provider_router(self): - if self.provider_router is None: - self.provider_router = self.client.get_router( - self.provider_router_id) - return self.provider_router - - def _dhcp_mappings(self, context, fixed_ips, mac): - for fixed_ip in fixed_ips: - subnet = self._get_subnet(context, fixed_ip["subnet_id"]) - if subnet["ip_version"] == 6: - # TODO(ryu) handle IPv6 - continue - if not subnet["enable_dhcp"]: - # Skip if DHCP is disabled - continue - yield subnet['cidr'], fixed_ip["ip_address"], mac - - def _metadata_subnets(self, context, fixed_ips): - for fixed_ip in fixed_ips: - subnet = self._get_subnet(context, fixed_ip["subnet_id"]) - if subnet["ip_version"] == 6: - continue - yield subnet['cidr'], fixed_ip["ip_address"] - - def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids): - - tenant_id = port["tenant_id"] - - position = 1 - # mac spoofing protection - self._add_chain_rule(in_chain, action='drop', - dl_src=port["mac_address"], inv_dl_src=True, - position=position) - - # ip spoofing protection - for fixed_ip in port["fixed_ips"]: - position += 1 - self._add_chain_rule(in_chain, action="drop", - src_addr=fixed_ip["ip_address"] + "/32", - inv_nw_src=True, dl_type=0x0800, # IPv4 - position=position) - - # conntrack - position += 1 - self._add_chain_rule(in_chain, action='accept', - match_forward_flow=True, - position=position) - - # Reset the position to process egress - position = 1 - - # Add rule for SGs - if sg_ids: - for sg_id in sg_ids: - chain_name = _sg_chain_names(sg_id)["ingress"] - chain = self.client.get_chain_by_name(tenant_id, chain_name) - self._add_chain_rule(out_chain, action='jump', - jump_chain_id=chain.get_id(), - jump_chain_name=chain_name, - position=position) - position += 1 - - # add reverse flow matching at the end - self._add_chain_rule(out_chain, action='accept', - match_return_flow=True, - position=position) - position += 1 - - # fall back DROP rule at the end except for ARP - self._add_chain_rule(out_chain, action='drop', - dl_type=0x0806, # ARP - inv_dl_type=True, position=position) - - def _bind_port_to_sgs(self, context, port, sg_ids): - self._process_port_create_security_group(context, port, sg_ids) - if sg_ids is not None: - for sg_id in sg_ids: - pg_name = _sg_port_group_name(sg_id) - self.client.add_port_to_port_group_by_name( - port["tenant_id"], pg_name, port["id"]) - - def _unbind_port_from_sgs(self, context, port_id): - self._delete_port_security_group_bindings(context, port_id) - self.client.remove_port_from_port_groups(port_id) - - def _create_accept_chain_rule(self, context, sg_rule, chain=None): - direction = sg_rule["direction"] - tenant_id = sg_rule["tenant_id"] - sg_id = sg_rule["security_group_id"] - chain_name = _sg_chain_names(sg_id)[direction] - - if chain is None: - chain = self.client.get_chain_by_name(tenant_id, chain_name) - - pg_id = None - if sg_rule["remote_group_id"] is not None: - pg_name = _sg_port_group_name(sg_id) - pg = self.client.get_port_group_by_name(tenant_id, pg_name) - pg_id = pg.get_id() - - props = {OS_SG_RULE_KEY: str(sg_rule["id"])} - - # Determine source or destination address by looking at direction - src_pg_id = dst_pg_id = None - src_addr = dst_addr = None - src_port_to = dst_port_to = None - src_port_from = dst_port_from = None - if direction == "egress": - dst_pg_id = pg_id - dst_addr = sg_rule["remote_ip_prefix"] - dst_port_from = sg_rule["port_range_min"] - dst_port_to = sg_rule["port_range_max"] - else: - src_pg_id = pg_id - src_addr = sg_rule["remote_ip_prefix"] - src_port_from = sg_rule["port_range_min"] - src_port_to = sg_rule["port_range_max"] - - return self._add_chain_rule( - chain, action='accept', port_group_src=src_pg_id, - port_group_dst=dst_pg_id, - src_addr=src_addr, src_port_from=src_port_from, - src_port_to=src_port_to, - dst_addr=dst_addr, dst_port_from=dst_port_from, - dst_port_to=dst_port_to, - nw_proto=net_util.get_protocol_value(sg_rule["protocol"]), - dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]), - properties=props) - - def _remove_nat_rules(self, context, fip): - router = self.client.get_router(fip["router_id"]) - self.client.remove_static_route(self._get_provider_router(), - fip["floating_ip_address"]) - - chain_names = _nat_chain_names(router.get_id()) - for _type, name in chain_names.iteritems(): - self.client.remove_rules_by_property( - router.get_tenant_id(), name, - OS_FLOATING_IP_RULE_KEY, fip["id"]) - - def setup_rpc(self): - # RPC support - self.topic = topics.PLUGIN - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [MidoRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def create_subnet(self, context, subnet): - """Create Neutron subnet. - - Creates a Neutron subnet and a DHCP entry in MidoNet bridge. - """ - LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet) - - s = subnet["subnet"] - net = super(MidonetPluginV2, self).get_network( - context, subnet['subnet']['network_id'], fields=None) - - session = context.session - with session.begin(subtransactions=True): - sn_entry = super(MidonetPluginV2, self).create_subnet(context, - subnet) - bridge = self.client.get_bridge(sn_entry['network_id']) - - gateway_ip = s['gateway_ip'] - cidr = s['cidr'] - if s['enable_dhcp']: - dns_nameservers = None - host_routes = None - if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED: - dns_nameservers = s['dns_nameservers'] - - if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED: - host_routes = s['host_routes'] - - self.client.create_dhcp(bridge, gateway_ip, cidr, - host_rts=host_routes, - dns_servers=dns_nameservers) - - # For external network, link the bridge to the provider router. - if net['router:external']: - self._link_bridge_to_gw_router( - bridge, self._get_provider_router(), gateway_ip, cidr) - - LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"), - sn_entry) - return sn_entry - - def delete_subnet(self, context, id): - """Delete Neutron subnet. - - Delete neutron network and its corresponding MidoNet bridge. - """ - LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id) - subnet = super(MidonetPluginV2, self).get_subnet(context, id, - fields=None) - net = super(MidonetPluginV2, self).get_network(context, - subnet['network_id'], - fields=None) - session = context.session - with session.begin(subtransactions=True): - - super(MidonetPluginV2, self).delete_subnet(context, id) - bridge = self.client.get_bridge(subnet['network_id']) - if subnet['enable_dhcp']: - self.client.delete_dhcp(bridge, subnet['cidr']) - - # If the network is external, clean up routes, links, ports - if net[ext_net.EXTERNAL]: - self._unlink_bridge_from_gw_router( - bridge, self._get_provider_router()) - - LOG.debug(_("MidonetPluginV2.delete_subnet exiting")) - - def create_network(self, context, network): - """Create Neutron network. - - Create a new Neutron network and its corresponding MidoNet bridge. - """ - LOG.debug(_('MidonetPluginV2.create_network called: network=%r'), - network) - net_data = network['network'] - tenant_id = self._get_tenant_id_for_create(context, net_data) - net_data['tenant_id'] = tenant_id - self._ensure_default_security_group(context, tenant_id) - - bridge = self.client.create_bridge(**net_data) - net_data['id'] = bridge.get_id() - - session = context.session - with session.begin(subtransactions=True): - net = super(MidonetPluginV2, self).create_network(context, network) - self._process_l3_create(context, net, net_data) - - LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net) - return net - - def update_network(self, context, id, network): - """Update Neutron network. - - Update an existing Neutron network and its corresponding MidoNet - bridge. - """ - LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, " - "network=%(network)r"), {'id': id, 'network': network}) - session = context.session - with session.begin(subtransactions=True): - net = super(MidonetPluginV2, self).update_network( - context, id, network) - self._process_l3_update(context, net, network['network']) - self.client.update_bridge(id, **network['network']) - - LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net) - return net - - def get_network(self, context, id, fields=None): - """Get Neutron network. - - Retrieves a Neutron network and its corresponding MidoNet bridge. - """ - LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, " - "fields=%(fields)r"), {'id': id, 'fields': fields}) - qnet = super(MidonetPluginV2, self).get_network(context, id, fields) - self.client.get_bridge(id) - - LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet) - return qnet - - def delete_network(self, context, id): - """Delete a network and its corresponding MidoNet bridge.""" - LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id) - self.client.delete_bridge(id) - try: - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, id) - super(MidonetPluginV2, self).delete_network(context, id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to delete neutron db, while Midonet ' - 'bridge=%r had been deleted'), id) - - def create_port(self, context, port): - """Create a L2 port in Neutron/MidoNet.""" - LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port) - port_data = port['port'] - - # Create a bridge port in MidoNet and set the bridge port ID as the - # port ID in Neutron. - bridge = self.client.get_bridge(port_data["network_id"]) - tenant_id = bridge.get_tenant_id() - asu = port_data.get("admin_state_up", True) - bridge_port = self.client.add_bridge_port(bridge, - admin_state_up=asu) - port_data["id"] = bridge_port.get_id() - - try: - session = context.session - with session.begin(subtransactions=True): - # Create a Neutron port - new_port = super(MidonetPluginV2, self).create_port(context, - port) - port_data.update(new_port) - self._ensure_default_security_group_on_port(context, - port) - if _is_vif_port(port_data): - # Bind security groups to the port - sg_ids = self._get_security_groups_on_port(context, port) - self._bind_port_to_sgs(context, new_port, sg_ids) - - # Create port chains - port_chains = {} - for d, name in _port_chain_names( - new_port["id"]).iteritems(): - port_chains[d] = self.client.create_chain(tenant_id, - name) - - self._initialize_port_chains(port_data, - port_chains['inbound'], - port_chains['outbound'], - sg_ids) - - # Update the port with the chain - self.client.update_port_chains( - bridge_port, port_chains["inbound"].get_id(), - port_chains["outbound"].get_id()) - - # DHCP mapping is only for VIF ports - for cidr, ip, mac in self._dhcp_mappings( - context, port_data["fixed_ips"], - port_data["mac_address"]): - self.client.add_dhcp_host(bridge, cidr, ip, mac) - - elif _is_dhcp_port(port_data): - # For DHCP port, add a metadata route - for cidr, ip in self._metadata_subnets( - context, port_data["fixed_ips"]): - self.client.add_dhcp_route_option(bridge, cidr, ip, - METADATA_DEFAULT_IP) - - self._process_portbindings_create_and_update(context, - port_data, new_port) - except Exception as ex: - # Try removing the MidoNet port before raising an exception. - with excutils.save_and_reraise_exception(): - LOG.error(_("Failed to create a port on network %(net_id)s: " - "%(err)s"), - {"net_id": port_data["network_id"], "err": ex}) - self.client.delete_port(bridge_port.get_id()) - - LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), new_port) - return new_port - - def get_port(self, context, id, fields=None): - """Retrieve port.""" - LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s " - "fields=%(fields)r"), {'id': id, 'fields': fields}) - port = super(MidonetPluginV2, self).get_port(context, id, fields) - "Check if the port exists in MidoNet DB""" - try: - self.client.get_port(id) - except midonet_lib.MidonetResourceNotFound as exc: - LOG.error(_("There is no port with ID %(id)s in MidoNet."), - {"id": id}) - port['status'] = constants.PORT_STATUS_ERROR - raise exc - LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port) - return port - - def get_ports(self, context, filters=None, fields=None): - """List neutron ports and verify that they exist in MidoNet.""" - LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s " - "fields=%(fields)r"), - {'filters': filters, 'fields': fields}) - ports = super(MidonetPluginV2, self).get_ports(context, filters, - fields) - return ports - - def delete_port(self, context, id, l3_port_check=True): - """Delete a neutron port and corresponding MidoNet bridge port.""" - LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s " - "l3_port_check=%(l3_port_check)r"), - {'id': id, 'l3_port_check': l3_port_check}) - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - - self.disassociate_floatingips(context, id) - port = self.get_port(context, id) - device_id = port['device_id'] - # If this port is for router interface/gw, unlink and delete. - if _is_router_interface_port(port): - self._unlink_bridge_from_router(device_id, id) - elif _is_router_gw_port(port): - # Gateway removed - # Remove all the SNAT rules that are tagged. - router = self._get_router(context, device_id) - tenant_id = router["tenant_id"] - chain_names = _nat_chain_names(device_id) - for _type, name in chain_names.iteritems(): - self.client.remove_rules_by_property( - tenant_id, name, OS_TENANT_ROUTER_RULE_KEY, - SNAT_RULE) - # Remove the default routes and unlink - self._remove_router_gateway(port['device_id']) - - self.client.delete_port(id, delete_chains=True) - try: - for cidr, ip, mac in self._dhcp_mappings( - context, port["fixed_ips"], port["mac_address"]): - self.client.delete_dhcp_host(port["network_id"], cidr, ip, - mac) - except Exception: - LOG.error(_("Failed to delete DHCP mapping for port %(id)s"), - {"id": id}) - - super(MidonetPluginV2, self).delete_port(context, id) - - def update_port(self, context, id, port): - """Handle port update, including security groups and fixed IPs.""" - with context.session.begin(subtransactions=True): - - # Get the port and save the fixed IPs - old_port = self._get_port(context, id) - net_id = old_port["network_id"] - mac = old_port["mac_address"] - old_ips = old_port["fixed_ips"] - # update the port DB - p = super(MidonetPluginV2, self).update_port(context, id, port) - - if "admin_state_up" in port["port"]: - asu = port["port"]["admin_state_up"] - mido_port = self.client.update_port(id, admin_state_up=asu) - - # If we're changing the admin_state_up flag and the port is - # associated with a router, then we also need to update the - # peer port. - if _is_router_interface_port(p): - self.client.update_port(mido_port.get_peer_id(), - admin_state_up=asu) - - new_ips = p["fixed_ips"] - if new_ips: - bridge = self.client.get_bridge(net_id) - # If it's a DHCP port, add a route to reach the MD server - if _is_dhcp_port(p): - for cidr, ip in self._metadata_subnets( - context, new_ips): - self.client.add_dhcp_route_option( - bridge, cidr, ip, METADATA_DEFAULT_IP) - else: - # IPs have changed. Re-map the DHCP entries - for cidr, ip, mac in self._dhcp_mappings( - context, old_ips, mac): - self.client.remove_dhcp_host( - bridge, cidr, ip, mac) - - for cidr, ip, mac in self._dhcp_mappings( - context, new_ips, mac): - self.client.add_dhcp_host( - bridge, cidr, ip, mac) - - if (self._check_update_deletes_security_groups(port) or - self._check_update_has_security_groups(port)): - self._unbind_port_from_sgs(context, p["id"]) - sg_ids = self._get_security_groups_on_port(context, port) - self._bind_port_to_sgs(context, p, sg_ids) - - self._process_portbindings_create_and_update(context, - port['port'], - p) - return p - - def create_router(self, context, router): - """Handle router creation. - - When a new Neutron router is created, its corresponding MidoNet router - is also created. In MidoNet, this router is initialized with chains - for inbound and outbound traffic, which will be used to hold other - chains that include various rules, such as NAT. - - :param router: Router information provided to create a new router. - """ - - # NOTE(dcahill): Similar to the NSX plugin, we completely override - # this method in order to be able to use the MidoNet ID as Neutron ID - # TODO(dcahill): Propose upstream patch for allowing - # 3rd parties to specify IDs as we do with l2 plugin - LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"), - {"router": router}) - r = router['router'] - tenant_id = self._get_tenant_id_for_create(context, r) - r['tenant_id'] = tenant_id - mido_router = self.client.create_router(**r) - mido_router_id = mido_router.get_id() - - try: - has_gw_info = False - if EXTERNAL_GW_INFO in r: - has_gw_info = True - gw_info = r.pop(EXTERNAL_GW_INFO) - with context.session.begin(subtransactions=True): - # pre-generate id so it will be available when - # configuring external gw port - router_db = l3_db.Router(id=mido_router_id, - tenant_id=tenant_id, - name=r['name'], - admin_state_up=r['admin_state_up'], - status="ACTIVE") - context.session.add(router_db) - if has_gw_info: - self._update_router_gw_info(context, router_db['id'], - gw_info) - - router_data = self._make_router_dict(router_db) - - except Exception: - # Try removing the midonet router - with excutils.save_and_reraise_exception(): - self.client.delete_router(mido_router_id) - - # Create router chains - chain_names = _nat_chain_names(mido_router_id) - try: - self.client.add_router_chains(mido_router, - chain_names["pre-routing"], - chain_names["post-routing"]) - except Exception: - # Set the router status to Error - with context.session.begin(subtransactions=True): - r = self._get_router(context, router_data["id"]) - router_data['status'] = constants.NET_STATUS_ERROR - r['status'] = router_data['status'] - context.session.add(r) - - LOG.debug(_("MidonetPluginV2.create_router exiting: " - "router_data=%(router_data)s."), - {"router_data": router_data}) - return router_data - - def _set_router_gateway(self, id, gw_router, gw_ip): - """Set router uplink gateway - - :param ID: ID of the router - :param gw_router: gateway router to link to - :param gw_ip: gateway IP address - """ - LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, " - "gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"), - {'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}), - - router = self.client.get_router(id) - - # Create a port in the gw router - gw_port = self.client.add_router_port(gw_router, - port_address='169.254.255.1', - network_address='169.254.255.0', - network_length=30) - - # Create a port in the router - port = self.client.add_router_port(router, - port_address='169.254.255.2', - network_address='169.254.255.0', - network_length=30) - - # Link them - self.client.link(gw_port, port.get_id()) - - # Add a route for gw_ip to bring it down to the router - self.client.add_router_route(gw_router, type='Normal', - src_network_addr='0.0.0.0', - src_network_length=0, - dst_network_addr=gw_ip, - dst_network_length=32, - next_hop_port=gw_port.get_id(), - weight=100) - - # Add default route to uplink in the router - self.client.add_router_route(router, type='Normal', - src_network_addr='0.0.0.0', - src_network_length=0, - dst_network_addr='0.0.0.0', - dst_network_length=0, - next_hop_port=port.get_id(), - weight=100) - - def _remove_router_gateway(self, id): - """Clear router gateway - - :param ID: ID of the router - """ - LOG.debug(_("MidonetPluginV2.remove_router_gateway called: " - "id=%(id)s"), {'id': id}) - router = self.client.get_router(id) - - # delete the port that is connected to the gateway router - for p in router.get_ports(): - if p.get_port_address() == '169.254.255.2': - peer_port_id = p.get_peer_id() - if peer_port_id is not None: - self.client.unlink(p) - self.client.delete_port(peer_port_id) - - # delete default route - for r in router.get_routes(): - if (r.get_dst_network_addr() == '0.0.0.0' and - r.get_dst_network_length() == 0): - self.client.delete_route(r.get_id()) - - def update_router(self, context, id, router): - """Handle router updates.""" - LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s " - "router=%(router)r"), {"id": id, "router": router}) - - router_data = router["router"] - - # Check if the update included changes to the gateway. - gw_updated = l3_db.EXTERNAL_GW_INFO in router_data - with context.session.begin(subtransactions=True): - - # Update the Neutron DB - r = super(MidonetPluginV2, self).update_router(context, id, - router) - tenant_id = r["tenant_id"] - if gw_updated: - if (l3_db.EXTERNAL_GW_INFO in r and - r[l3_db.EXTERNAL_GW_INFO] is not None): - # Gateway created - gw_port_neutron = self._get_port( - context.elevated(), r["gw_port_id"]) - gw_ip = gw_port_neutron['fixed_ips'][0]['ip_address'] - - # First link routers and set up the routes - self._set_router_gateway(r["id"], - self._get_provider_router(), - gw_ip) - gw_port_midonet = self.client.get_link_port( - self._get_provider_router(), r["id"]) - - # Get the NAT chains and add dynamic SNAT rules. - chain_names = _nat_chain_names(r["id"]) - props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE} - self.client.add_dynamic_snat(tenant_id, - chain_names['pre-routing'], - chain_names['post-routing'], - gw_ip, - gw_port_midonet.get_id(), - **props) - - self.client.update_router(id, **router_data) - - LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r) - return r - - def delete_router(self, context, id): - """Handler for router deletion. - - Deleting a router on Neutron simply means deleting its corresponding - router in MidoNet. - - :param id: router ID to remove - """ - LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id) - - self.client.delete_router_chains(id) - self.client.delete_router(id) - - super(MidonetPluginV2, self).delete_router(context, id) - - def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr): - """Link a bridge to the gateway router - - :param bridge: bridge - :param gw_router: gateway router to link to - :param gw_ip: IP address of gateway - :param cidr: network CIDR - """ - net_addr, net_len = net_util.net_addr(cidr) - - # create a port on the gateway router - gw_port = self.client.add_router_port(gw_router, port_address=gw_ip, - network_address=net_addr, - network_length=net_len) - - # create a bridge port, then link it to the router. - port = self.client.add_bridge_port(bridge) - self.client.link(gw_port, port.get_id()) - - # add a route for the subnet in the gateway router - self.client.add_router_route(gw_router, type='Normal', - src_network_addr='0.0.0.0', - src_network_length=0, - dst_network_addr=net_addr, - dst_network_length=net_len, - next_hop_port=gw_port.get_id(), - weight=100) - - def _unlink_bridge_from_gw_router(self, bridge, gw_router): - """Unlink a bridge from the gateway router - - :param bridge: bridge to unlink - :param gw_router: gateway router to unlink from - """ - # Delete routes and unlink the router and the bridge. - routes = self.client.get_router_routes(gw_router.get_id()) - - bridge_ports_to_delete = [ - p for p in gw_router.get_peer_ports() - if p.get_device_id() == bridge.get_id()] - - for p in bridge.get_peer_ports(): - if p.get_device_id() == gw_router.get_id(): - # delete the routes going to the bridge - for r in routes: - if r.get_next_hop_port() == p.get_id(): - self.client.delete_route(r.get_id()) - self.client.unlink(p) - self.client.delete_port(p.get_id()) - - # delete bridge port - for port in bridge_ports_to_delete: - self.client.delete_port(port.get_id()) - - def _link_bridge_to_router(self, router, bridge_port, net_addr, net_len, - gw_ip, metadata_gw_ip): - router_port = self.client.add_router_port( - router, network_length=net_len, network_address=net_addr, - port_address=gw_ip, admin_state_up=bridge_port['admin_state_up']) - self.client.link(router_port, bridge_port['id']) - self.client.add_router_route(router, type='Normal', - src_network_addr='0.0.0.0', - src_network_length=0, - dst_network_addr=net_addr, - dst_network_length=net_len, - next_hop_port=router_port.get_id(), - weight=100) - - if metadata_gw_ip: - # Add a route for the metadata server. - # Not all VM images supports DHCP option 121. Add a route for the - # Metadata server in the router to forward the packet to the bridge - # that will send them to the Metadata Proxy. - md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP) - self.client.add_router_route( - router, type='Normal', src_network_addr=net_addr, - src_network_length=net_len, - dst_network_addr=md_net_addr, - dst_network_length=md_net_len, - next_hop_port=router_port.get_id(), - next_hop_gateway=metadata_gw_ip) - - def _unlink_bridge_from_router(self, router_id, bridge_port_id): - """Unlink a bridge from a router.""" - - # Remove the routes to the port and unlink the port - bridge_port = self.client.get_port(bridge_port_id) - routes = self.client.get_router_routes(router_id) - self.client.delete_port_routes(routes, bridge_port.get_peer_id()) - self.client.unlink(bridge_port) - - def add_router_interface(self, context, router_id, interface_info): - """Handle router linking with network.""" - LOG.debug(_("MidonetPluginV2.add_router_interface called: " - "router_id=%(router_id)s " - "interface_info=%(interface_info)r"), - {'router_id': router_id, 'interface_info': interface_info}) - - with context.session.begin(subtransactions=True): - info = super(MidonetPluginV2, self).add_router_interface( - context, router_id, interface_info) - - try: - subnet = self._get_subnet(context, info["subnet_id"]) - cidr = subnet["cidr"] - net_addr, net_len = net_util.net_addr(cidr) - router = self.client.get_router(router_id) - - # Get the metadata GW IP - metadata_gw_ip = None - rport_qry = context.session.query(models_v2.Port) - dhcp_ports = rport_qry.filter_by( - network_id=subnet["network_id"], - device_owner=constants.DEVICE_OWNER_DHCP).all() - if dhcp_ports and dhcp_ports[0].fixed_ips: - metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address - else: - LOG.warn(_("DHCP agent is not working correctly. No port " - "to reach the Metadata server on this network")) - # Link the router and the bridge - port = super(MidonetPluginV2, self).get_port(context, - info["port_id"]) - self._link_bridge_to_router(router, port, net_addr, - net_len, subnet["gateway_ip"], - metadata_gw_ip) - except Exception: - LOG.error(_("Failed to create MidoNet resources to add router " - "interface. info=%(info)s, router_id=%(router_id)s"), - {"info": info, "router_id": router_id}) - with excutils.save_and_reraise_exception(): - with context.session.begin(subtransactions=True): - self.remove_router_interface(context, router_id, info) - - LOG.debug(_("MidonetPluginV2.add_router_interface exiting: " - "info=%r"), info) - return info - - def _assoc_fip(self, fip): - router = self.client.get_router(fip["router_id"]) - link_port = self.client.get_link_port( - self._get_provider_router(), router.get_id()) - self.client.add_router_route( - self._get_provider_router(), - src_network_addr='0.0.0.0', - src_network_length=0, - dst_network_addr=fip["floating_ip_address"], - dst_network_length=32, - next_hop_port=link_port.get_peer_id()) - props = {OS_FLOATING_IP_RULE_KEY: fip['id']} - tenant_id = router.get_tenant_id() - chain_names = _nat_chain_names(router.get_id()) - for chain_type, name in chain_names.items(): - src_ip, target_ip = _get_nat_ips(chain_type, fip) - if chain_type == 'pre-routing': - nat_type = 'dnat' - else: - nat_type = 'snat' - self.client.add_static_nat(tenant_id, name, src_ip, - target_ip, - link_port.get_id(), - nat_type, **props) - - def create_floatingip(self, context, floatingip): - session = context.session - with session.begin(subtransactions=True): - fip = super(MidonetPluginV2, self).create_floatingip( - context, floatingip) - if fip['port_id']: - self._assoc_fip(fip) - return fip - - def update_floatingip(self, context, id, floatingip): - """Handle floating IP association and disassociation.""" - LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s " - "floatingip=%(floatingip)s "), - {'id': id, 'floatingip': floatingip}) - - session = context.session - with session.begin(subtransactions=True): - if floatingip['floatingip']['port_id']: - fip = super(MidonetPluginV2, self).update_floatingip( - context, id, floatingip) - - self._assoc_fip(fip) - - # disassociate floating IP - elif floatingip['floatingip']['port_id'] is None: - fip = super(MidonetPluginV2, self).get_floatingip(context, id) - self._remove_nat_rules(context, fip) - super(MidonetPluginV2, self).update_floatingip(context, id, - floatingip) - - LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip) - return fip - - def disassociate_floatingips(self, context, port_id): - """Disassociate floating IPs (if any) from this port.""" - try: - fip_qry = context.session.query(l3_db.FloatingIP) - fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) - for fip_db in fip_dbs: - self._remove_nat_rules(context, fip_db) - except sa_exc.NoResultFound: - pass - - super(MidonetPluginV2, self).disassociate_floatingips(context, port_id) - - def create_security_group(self, context, security_group, default_sg=False): - """Create security group. - - Create a new security group, including the default security group. - In MidoNet, this means creating a pair of chains, inbound and outbound, - as well as a new port group. - """ - LOG.debug(_("MidonetPluginV2.create_security_group called: " - "security_group=%(security_group)s " - "default_sg=%(default_sg)s "), - {'security_group': security_group, 'default_sg': default_sg}) - - sg = security_group.get('security_group') - tenant_id = self._get_tenant_id_for_create(context, sg) - if not default_sg: - self._ensure_default_security_group(context, tenant_id) - - # Create the Neutron sg first - sg = super(MidonetPluginV2, self).create_security_group( - context, security_group, default_sg) - - try: - # Process the MidoNet side - self.client.create_port_group(tenant_id, - _sg_port_group_name(sg["id"])) - chain_names = _sg_chain_names(sg["id"]) - chains = {} - for direction, chain_name in chain_names.iteritems(): - c = self.client.create_chain(tenant_id, chain_name) - chains[direction] = c - - # Create all the rules for this SG. Only accept rules are created - for r in sg['security_group_rules']: - self._create_accept_chain_rule(context, r, - chain=chains[r['direction']]) - except Exception: - LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"), - {"sg": sg}) - with excutils.save_and_reraise_exception(): - with context.session.begin(subtransactions=True): - sg = self._get_security_group(context, sg["id"]) - context.session.delete(sg) - - LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"), - sg) - return sg - - def delete_security_group(self, context, id): - """Delete chains for Neutron security group.""" - LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id) - - with context.session.begin(subtransactions=True): - sg = super(MidonetPluginV2, self).get_security_group(context, id) - if not sg: - raise ext_sg.SecurityGroupNotFound(id=id) - - if sg["name"] == 'default' and not context.is_admin: - raise ext_sg.SecurityGroupCannotRemoveDefault() - - sg_id = sg['id'] - filters = {'security_group_id': [sg_id]} - if super(MidonetPluginV2, self)._get_port_security_group_bindings( - context, filters): - raise ext_sg.SecurityGroupInUse(id=sg_id) - - # Delete MidoNet Chains and portgroup for the SG - tenant_id = sg['tenant_id'] - self.client.delete_chains_by_names( - tenant_id, _sg_chain_names(sg["id"]).values()) - - self.client.delete_port_group_by_name( - tenant_id, _sg_port_group_name(sg["id"])) - - super(MidonetPluginV2, self).delete_security_group(context, id) - - def create_security_group_rule(self, context, security_group_rule): - """Create a security group rule - - Create a security group rule in the Neutron DB and corresponding - MidoNet resources in its data store. - """ - LOG.debug(_("MidonetPluginV2.create_security_group_rule called: " - "security_group_rule=%(security_group_rule)r"), - {'security_group_rule': security_group_rule}) - - with context.session.begin(subtransactions=True): - rule = super(MidonetPluginV2, self).create_security_group_rule( - context, security_group_rule) - - self._create_accept_chain_rule(context, rule) - - LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: " - "rule=%r"), rule) - return rule - - def delete_security_group_rule(self, context, sg_rule_id): - """Delete a security group rule - - Delete a security group rule from the Neutron DB and corresponding - MidoNet resources from its data store. - """ - LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: " - "sg_rule_id=%s"), sg_rule_id) - with context.session.begin(subtransactions=True): - rule = super(MidonetPluginV2, self).get_security_group_rule( - context, sg_rule_id) - - if not rule: - raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id) - - sg = self._get_security_group(context, - rule["security_group_id"]) - chain_name = _sg_chain_names(sg["id"])[rule["direction"]] - self.client.remove_rules_by_property(rule["tenant_id"], chain_name, - OS_SG_RULE_KEY, - str(rule["id"])) - super(MidonetPluginV2, self).delete_security_group_rule( - context, sg_rule_id) - - def _add_chain_rule(self, chain, action, **kwargs): - - nw_proto = kwargs.get("nw_proto") - src_addr = kwargs.pop("src_addr", None) - dst_addr = kwargs.pop("dst_addr", None) - src_port_from = kwargs.pop("src_port_from", None) - src_port_to = kwargs.pop("src_port_to", None) - dst_port_from = kwargs.pop("dst_port_from", None) - dst_port_to = kwargs.pop("dst_port_to", None) - - # Convert to the keys and values that midonet client understands - if src_addr: - kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr( - src_addr) - - if dst_addr: - kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr( - dst_addr) - - kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to} - - kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to} - - if nw_proto == 1: # ICMP - # Overwrite port fields regardless of the direction - kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from} - kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to} - - return self.client.add_chain_rule(chain, action=action, **kwargs) diff --git a/neutron/plugins/ml2/README b/neutron/plugins/ml2/README deleted file mode 100644 index 4dce789cb..000000000 --- a/neutron/plugins/ml2/README +++ /dev/null @@ -1,53 +0,0 @@ -The Modular Layer 2 (ML2) plugin is a framework allowing OpenStack -Networking to simultaneously utilize the variety of layer 2 networking -technologies found in complex real-world data centers. It supports the -Open vSwitch, Linux bridge, and Hyper-V L2 agents, replacing and -deprecating the monolithic plugins previously associated with those -agents, and can also support hardware devices and SDN controllers. The -ML2 framework is intended to greatly simplify adding support for new -L2 networking technologies, requiring much less initial and ongoing -effort than would be required for an additional monolithic core -plugin. It is also intended to foster innovation through its -organization as optional driver modules. - -The ML2 plugin supports all the non-vendor-specific neutron API -extensions, and works with the standard neutron DHCP agent. It -utilizes the service plugin interface to implement the L3 router -abstraction, allowing use of either the standard neutron L3 agent or -alternative L3 solutions. Additional service plugins can also be used -with the ML2 core plugin. - -Drivers within ML2 implement separately extensible sets of network -types and of mechanisms for accessing networks of those types. Unlike -with the metaplugin, multiple mechanisms can be used simultaneously to -access different ports of the same virtual network. Mechanisms can -utilize L2 agents via RPC and/or interact with external devices or -controllers. By utilizing the multiprovidernet extension, virtual -networks can be composed of multiple segments of the same or different -types. Type and mechanism drivers are loaded as python entrypoints -using the stevedore library. - -Each available network type is managed by an ML2 type driver. Type -drivers maintain any needed type-specific network state, and perform -provider network validation and tenant network allocation. As of the -havana release, drivers for the local, flat, vlan, gre, and vxlan -network types are included. - -Each available networking mechanism is managed by an ML2 mechanism -driver. All registered mechanism drivers are called twice when -networks, subnets, and ports are created, updated, or deleted. They -are first called as part of the DB transaction, where they can -maintain any needed driver-specific state. Once the transaction has -been committed, they are called again, at which point they can -interact with external devices and controllers. Mechanism drivers are -also called as part of the port binding process, to determine whether -the associated mechanism can provide connectivity for the network, and -if so, the network segment and VIF driver to be used. The havana -release includes mechanism drivers for the Open vSwitch, Linux bridge, -and Hyper-V L2 agents, for Arista and Cisco switches, and for the -Tail-f NCS. It also includes an L2 Population mechanism driver that -can help optimize tunneled virtual network traffic. - -For additional information regarding the ML2 plugin and its collection -of type and mechanism drivers, see the OpenStack manuals and -http://wiki.openstack.org/wiki/Neutron/ML2. diff --git a/neutron/plugins/ml2/__init__.py b/neutron/plugins/ml2/__init__.py deleted file mode 100644 index 788cea1f7..000000000 --- a/neutron/plugins/ml2/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ml2/common/__init__.py b/neutron/plugins/ml2/common/__init__.py deleted file mode 100644 index 788cea1f7..000000000 --- a/neutron/plugins/ml2/common/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ml2/common/exceptions.py b/neutron/plugins/ml2/common/exceptions.py deleted file mode 100644 index ed94b1e1f..000000000 --- a/neutron/plugins/ml2/common/exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Exceptions used by ML2.""" - -from neutron.common import exceptions - - -class MechanismDriverError(exceptions.NeutronException): - """Mechanism driver call failed.""" - message = _("%(method)s failed.") diff --git a/neutron/plugins/ml2/config.py b/neutron/plugins/ml2/config.py deleted file mode 100644 index afce63045..000000000 --- a/neutron/plugins/ml2/config.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -ml2_opts = [ - cfg.ListOpt('type_drivers', - default=['local', 'flat', 'vlan', 'gre', 'vxlan'], - help=_("List of network type driver entrypoints to be loaded " - "from the neutron.ml2.type_drivers namespace.")), - cfg.ListOpt('tenant_network_types', - default=['local'], - help=_("Ordered list of network_types to allocate as tenant " - "networks.")), - cfg.ListOpt('mechanism_drivers', - default=[], - help=_("An ordered list of networking mechanism driver " - "entrypoints to be loaded from the " - "neutron.ml2.mechanism_drivers namespace.")), -] - - -cfg.CONF.register_opts(ml2_opts, "ml2") diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py deleted file mode 100644 index 4cf8eed32..000000000 --- a/neutron/plugins/ml2/db.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.orm import exc - -from neutron.db import api as db_api -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import portbindings -from neutron import manager -from neutron.openstack.common import log -from neutron.openstack.common import uuidutils -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2 import models - -LOG = log.getLogger(__name__) - - -def add_network_segment(session, network_id, segment): - with session.begin(subtransactions=True): - record = models.NetworkSegment( - id=uuidutils.generate_uuid(), - network_id=network_id, - network_type=segment.get(api.NETWORK_TYPE), - physical_network=segment.get(api.PHYSICAL_NETWORK), - segmentation_id=segment.get(api.SEGMENTATION_ID) - ) - session.add(record) - LOG.info(_("Added segment %(id)s of type %(network_type)s for network" - " %(network_id)s"), - {'id': record.id, - 'network_type': record.network_type, - 'network_id': record.network_id}) - - -def get_network_segments(session, network_id): - with session.begin(subtransactions=True): - records = (session.query(models.NetworkSegment). - filter_by(network_id=network_id)) - return [{api.ID: record.id, - api.NETWORK_TYPE: record.network_type, - api.PHYSICAL_NETWORK: record.physical_network, - api.SEGMENTATION_ID: record.segmentation_id} - for record in records] - - -def ensure_port_binding(session, port_id): - with session.begin(subtransactions=True): - try: - record = (session.query(models.PortBinding). - filter_by(port_id=port_id). - one()) - except exc.NoResultFound: - record = models.PortBinding( - port_id=port_id, - vif_type=portbindings.VIF_TYPE_UNBOUND) - session.add(record) - return record - - -def get_port(session, port_id): - """Get port record for update within transcation.""" - - with session.begin(subtransactions=True): - try: - record = (session.query(models_v2.Port). - filter(models_v2.Port.id.startswith(port_id)). - one()) - return record - except exc.NoResultFound: - return - except exc.MultipleResultsFound: - LOG.error(_("Multiple ports have port_id starting with %s"), - port_id) - return - - -def get_port_from_device_mac(device_mac): - LOG.debug(_("get_port_from_device_mac() called for mac %s"), device_mac) - session = db_api.get_session() - qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) - return qry.first() - - -def get_port_and_sgs(port_id): - """Get port from database with security group info.""" - - LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id) - session = db_api.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - with session.begin(subtransactions=True): - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id.startswith(port_id)) - port_and_sgs = query.all() - if not port_and_sgs: - return - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict['security_groups'] = [ - sg_id for port_, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict - - -def get_port_binding_host(port_id): - session = db_api.get_session() - with session.begin(subtransactions=True): - try: - query = (session.query(models.PortBinding). - filter(models.PortBinding.port_id.startswith(port_id)). - one()) - except exc.NoResultFound: - LOG.debug(_("No binding found for port %(port_id)s"), - {'port_id': port_id}) - return - return query.host diff --git a/neutron/plugins/ml2/driver_api.py b/neutron/plugins/ml2/driver_api.py deleted file mode 100644 index 2384b0cf9..000000000 --- a/neutron/plugins/ml2/driver_api.py +++ /dev/null @@ -1,597 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -# The following keys are used in the segment dictionaries passed via -# the driver API. These are defined separately from similar keys in -# neutron.extensions.providernet so that drivers don't need to change -# if/when providernet moves to the core API. -# -ID = 'id' -NETWORK_TYPE = 'network_type' -PHYSICAL_NETWORK = 'physical_network' -SEGMENTATION_ID = 'segmentation_id' - - -@six.add_metaclass(abc.ABCMeta) -class TypeDriver(object): - """Define stable abstract interface for ML2 type drivers. - - ML2 type drivers each support a specific network_type for provider - and/or tenant network segments. Type drivers must implement this - abstract interface, which defines the API by which the plugin uses - the driver to manage the persistent type-specific resource - allocation state associated with network segments of that type. - - Network segments are represented by segment dictionaries using the - NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined - above, corresponding to the provider attributes. Future revisions - of the TypeDriver API may add additional segment dictionary - keys. Attributes not applicable for a particular network_type may - either be excluded or stored as None. - """ - - @abc.abstractmethod - def get_type(self): - """Get driver's network type. - - :returns network_type value handled by this driver - """ - pass - - @abc.abstractmethod - def initialize(self): - """Perform driver initialization. - - Called after all drivers have been loaded and the database has - been initialized. No abstract methods defined below will be - called prior to this method being called. - """ - pass - - @abc.abstractmethod - def validate_provider_segment(self, segment): - """Validate attributes of a provider network segment. - - :param segment: segment dictionary using keys defined above - :raises: neutron.common.exceptions.InvalidInput if invalid - - Called outside transaction context to validate the provider - attributes for a provider network segment. Raise InvalidInput - if: - - - any required attribute is missing - - any prohibited or unrecognized attribute is present - - any attribute value is not valid - - The network_type attribute is present in segment, but - need not be validated. - """ - pass - - @abc.abstractmethod - def reserve_provider_segment(self, session, segment): - """Reserve resource associated with a provider network segment. - - :param session: database session - :param segment: segment dictionary using keys defined above - - Called inside transaction context on session to reserve the - type-specific resource for a provider network segment. The - segment dictionary passed in was returned by a previous - validate_provider_segment() call. - """ - pass - - @abc.abstractmethod - def allocate_tenant_segment(self, session): - """Allocate resource for a new tenant network segment. - - :param session: database session - :returns: segment dictionary using keys defined above - - Called inside transaction context on session to allocate a new - tenant network, typically from a type-specific resource - pool. If successful, return a segment dictionary describing - the segment. If tenant network segment cannot be allocated - (i.e. tenant networks not supported or resource pool is - exhausted), return None. - """ - pass - - @abc.abstractmethod - def release_segment(self, session, segment): - """Release network segment. - - :param session: database session - :param segment: segment dictionary using keys defined above - - Called inside transaction context on session to release a - tenant or provider network's type-specific resource. Runtime - errors are not expected, but raising an exception will result - in rollback of the transaction. - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class NetworkContext(object): - """Context passed to MechanismDrivers for changes to network resources. - - A NetworkContext instance wraps a network resource. It provides - helper methods for accessing other relevant information. Results - from expensive operations are cached so that other - MechanismDrivers can freely access the same information. - """ - - @abc.abstractproperty - def current(self): - """Return the current state of the network. - - Return the current state of the network, as defined by - NeutronPluginBaseV2.create_network and all extensions in the - ml2 plugin. - """ - pass - - @abc.abstractproperty - def original(self): - """Return the original state of the network. - - Return the original state of the network, prior to a call to - update_network. Method is only valid within calls to - update_network_precommit and update_network_postcommit. - """ - pass - - @abc.abstractproperty - def network_segments(self): - """Return the segments associated with this network resource.""" - pass - - -@six.add_metaclass(abc.ABCMeta) -class SubnetContext(object): - """Context passed to MechanismDrivers for changes to subnet resources. - - A SubnetContext instance wraps a subnet resource. It provides - helper methods for accessing other relevant information. Results - from expensive operations are cached so that other - MechanismDrivers can freely access the same information. - """ - - @abc.abstractproperty - def current(self): - """Return the current state of the subnet. - - Return the current state of the subnet, as defined by - NeutronPluginBaseV2.create_subnet and all extensions in the - ml2 plugin. - """ - pass - - @abc.abstractproperty - def original(self): - """Return the original state of the subnet. - - Return the original state of the subnet, prior to a call to - update_subnet. Method is only valid within calls to - update_subnet_precommit and update_subnet_postcommit. - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class PortContext(object): - """Context passed to MechanismDrivers for changes to port resources. - - A PortContext instance wraps a port resource. It provides helper - methods for accessing other relevant information. Results from - expensive operations are cached so that other MechanismDrivers can - freely access the same information. - """ - - @abc.abstractproperty - def current(self): - """Return the current state of the port. - - Return the current state of the port, as defined by - NeutronPluginBaseV2.create_port and all extensions in the ml2 - plugin. - """ - pass - - @abc.abstractproperty - def original(self): - """Return the original state of the port. - - Return the original state of the port, prior to a call to - update_port. Method is only valid within calls to - update_port_precommit and update_port_postcommit. - """ - pass - - @abc.abstractproperty - def network(self): - """Return the NetworkContext associated with this port.""" - pass - - @abc.abstractproperty - def bound_segment(self): - """Return the currently bound segment dictionary.""" - pass - - @abc.abstractproperty - def original_bound_segment(self): - """Return the original bound segment dictionary. - - Return the original bound segment dictionary, prior to a call - to update_port. Method is only valid within calls to - update_port_precommit and update_port_postcommit. - """ - pass - - @abc.abstractproperty - def bound_driver(self): - """Return the currently bound mechanism driver name.""" - pass - - @abc.abstractproperty - def original_bound_driver(self): - """Return the original bound mechanism driver name. - - Return the original bound mechanism driver name, prior to a - call to update_port. Method is only valid within calls to - update_port_precommit and update_port_postcommit. - """ - pass - - @abc.abstractmethod - def host_agents(self, agent_type): - """Get agents of the specified type on port's host. - - :param agent_type: Agent type identifier - :returns: List of agents_db.Agent records - """ - pass - - @abc.abstractmethod - def set_binding(self, segment_id, vif_type, vif_details, - status=None): - """Set the binding for the port. - - :param segment_id: Network segment bound for the port. - :param vif_type: The VIF type for the bound port. - :param vif_details: Dictionary with details for VIF driver. - :param status: Port status to set if not None. - - Called by MechanismDriver.bind_port to indicate success and - specify binding details to use for port. The segment_id must - identify an item in network.network_segments. - """ - pass - - -@six.add_metaclass(abc.ABCMeta) -class MechanismDriver(object): - """Define stable abstract interface for ML2 mechanism drivers. - - A mechanism driver is called on the creation, update, and deletion - of networks and ports. For every event, there are two methods that - get called - one within the database transaction (method suffix of - _precommit), one right afterwards (method suffix of _postcommit). - - Exceptions raised by methods called inside the transaction can - rollback, but should not make any blocking calls (for example, - REST requests to an outside controller). Methods called after - transaction commits can make blocking external calls, though these - will block the entire process. Exceptions raised in calls after - the transaction commits may cause the associated resource to be - deleted. - - Because rollback outside of the transaction is not done in the - update network/port case, all data validation must be done within - methods that are part of the database transaction. - """ - - @abc.abstractmethod - def initialize(self): - """Perform driver initialization. - - Called after all drivers have been loaded and the database has - been initialized. No abstract methods defined below will be - called prior to this method being called. - """ - pass - - def create_network_precommit(self, context): - """Allocate resources for a new network. - - :param context: NetworkContext instance describing the new - network. - - Create a new network, allocating resources as necessary in the - database. Called inside transaction context on session. Call - cannot block. Raising an exception will result in a rollback - of the current transaction. - """ - pass - - def create_network_postcommit(self, context): - """Create a network. - - :param context: NetworkContext instance describing the new - network. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - cause the deletion of the resource. - """ - pass - - def update_network_precommit(self, context): - """Update resources of a network. - - :param context: NetworkContext instance describing the new - state of the network, as well as the original state prior - to the update_network call. - - Update values of a network, updating the associated resources - in the database. Called inside transaction context on session. - Raising an exception will result in rollback of the - transaction. - - update_network_precommit is called for all changes to the - network state. It is up to the mechanism driver to ignore - state or state changes that it does not know or care about. - """ - pass - - def update_network_postcommit(self, context): - """Update a network. - - :param context: NetworkContext instance describing the new - state of the network, as well as the original state prior - to the update_network call. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - cause the deletion of the resource. - - update_network_postcommit is called for all changes to the - network state. It is up to the mechanism driver to ignore - state or state changes that it does not know or care about. - """ - pass - - def delete_network_precommit(self, context): - """Delete resources for a network. - - :param context: NetworkContext instance describing the current - state of the network, prior to the call to delete it. - - Delete network resources previously allocated by this - mechanism driver for a network. Called inside transaction - context on session. Runtime errors are not expected, but - raising an exception will result in rollback of the - transaction. - """ - pass - - def delete_network_postcommit(self, context): - """Delete a network. - - :param context: NetworkContext instance describing the current - state of the network, prior to the call to delete it. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Runtime errors are not - expected, and will not prevent the resource from being - deleted. - """ - pass - - def create_subnet_precommit(self, context): - """Allocate resources for a new subnet. - - :param context: SubnetContext instance describing the new - subnet. - - Create a new subnet, allocating resources as necessary in the - database. Called inside transaction context on session. Call - cannot block. Raising an exception will result in a rollback - of the current transaction. - """ - pass - - def create_subnet_postcommit(self, context): - """Create a subnet. - - :param context: SubnetContext instance describing the new - subnet. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - cause the deletion of the resource. - """ - pass - - def update_subnet_precommit(self, context): - """Update resources of a subnet. - - :param context: SubnetContext instance describing the new - state of the subnet, as well as the original state prior - to the update_subnet call. - - Update values of a subnet, updating the associated resources - in the database. Called inside transaction context on session. - Raising an exception will result in rollback of the - transaction. - - update_subnet_precommit is called for all changes to the - subnet state. It is up to the mechanism driver to ignore - state or state changes that it does not know or care about. - """ - pass - - def update_subnet_postcommit(self, context): - """Update a subnet. - - :param context: SubnetContext instance describing the new - state of the subnet, as well as the original state prior - to the update_subnet call. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - cause the deletion of the resource. - - update_subnet_postcommit is called for all changes to the - subnet state. It is up to the mechanism driver to ignore - state or state changes that it does not know or care about. - """ - pass - - def delete_subnet_precommit(self, context): - """Delete resources for a subnet. - - :param context: SubnetContext instance describing the current - state of the subnet, prior to the call to delete it. - - Delete subnet resources previously allocated by this - mechanism driver for a subnet. Called inside transaction - context on session. Runtime errors are not expected, but - raising an exception will result in rollback of the - transaction. - """ - pass - - def delete_subnet_postcommit(self, context): - """Delete a subnet. - - :param context: SubnetContext instance describing the current - state of the subnet, prior to the call to delete it. - - Called after the transaction commits. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Runtime errors are not - expected, and will not prevent the resource from being - deleted. - """ - pass - - def create_port_precommit(self, context): - """Allocate resources for a new port. - - :param context: PortContext instance describing the port. - - Create a new port, allocating resources as necessary in the - database. Called inside transaction context on session. Call - cannot block. Raising an exception will result in a rollback - of the current transaction. - """ - pass - - def create_port_postcommit(self, context): - """Create a port. - - :param context: PortContext instance describing the port. - - Called after the transaction completes. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - result in the deletion of the resource. - """ - pass - - def update_port_precommit(self, context): - """Update resources of a port. - - :param context: PortContext instance describing the new - state of the port, as well as the original state prior - to the update_port call. - - Called inside transaction context on session to complete a - port update as defined by this mechanism driver. Raising an - exception will result in rollback of the transaction. - - update_port_precommit is called for all changes to the port - state. It is up to the mechanism driver to ignore state or - state changes that it does not know or care about. - """ - pass - - def update_port_postcommit(self, context): - """Update a port. - - :param context: PortContext instance describing the new - state of the port, as well as the original state prior - to the update_port call. - - Called after the transaction completes. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Raising an exception will - result in the deletion of the resource. - - update_port_postcommit is called for all changes to the port - state. It is up to the mechanism driver to ignore state or - state changes that it does not know or care about. - """ - pass - - def delete_port_precommit(self, context): - """Delete resources of a port. - - :param context: PortContext instance describing the current - state of the port, prior to the call to delete it. - - Called inside transaction context on session. Runtime errors - are not expected, but raising an exception will result in - rollback of the transaction. - """ - pass - - def delete_port_postcommit(self, context): - """Delete a port. - - :param context: PortContext instance describing the current - state of the port, prior to the call to delete it. - - Called after the transaction completes. Call can block, though - will block the entire process so care should be taken to not - drastically affect performance. Runtime errors are not - expected, and will not prevent the resource from being - deleted. - """ - pass - - def bind_port(self, context): - """Attempt to bind a port. - - :param context: PortContext instance describing the port - - Called inside transaction context on session, prior to - create_port_precommit or update_port_precommit, to - attempt to establish a port binding. If the driver is able to - bind the port, it calls context.set_binding with the binding - details. - """ - pass diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py deleted file mode 100644 index 0c1180619..000000000 --- a/neutron/plugins/ml2/driver_context.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.openstack.common import jsonutils -from neutron.plugins.ml2 import db -from neutron.plugins.ml2 import driver_api as api - - -class MechanismDriverContext(object): - """MechanismDriver context base class.""" - def __init__(self, plugin, plugin_context): - self._plugin = plugin - # This temporarily creates a reference loop, but the - # lifetime of PortContext is limited to a single - # method call of the plugin. - self._plugin_context = plugin_context - - -class NetworkContext(MechanismDriverContext, api.NetworkContext): - - def __init__(self, plugin, plugin_context, network, - original_network=None): - super(NetworkContext, self).__init__(plugin, plugin_context) - self._network = network - self._original_network = original_network - self._segments = db.get_network_segments(plugin_context.session, - network['id']) - - @property - def current(self): - return self._network - - @property - def original(self): - return self._original_network - - @property - def network_segments(self): - return self._segments - - -class SubnetContext(MechanismDriverContext, api.SubnetContext): - - def __init__(self, plugin, plugin_context, subnet, original_subnet=None): - super(SubnetContext, self).__init__(plugin, plugin_context) - self._subnet = subnet - self._original_subnet = original_subnet - - @property - def current(self): - return self._subnet - - @property - def original(self): - return self._original_subnet - - -class PortContext(MechanismDriverContext, api.PortContext): - - def __init__(self, plugin, plugin_context, port, network, - original_port=None): - super(PortContext, self).__init__(plugin, plugin_context) - self._port = port - self._original_port = original_port - self._network_context = NetworkContext(plugin, plugin_context, - network) - self._binding = db.ensure_port_binding(plugin_context.session, - port['id']) - if original_port: - self._original_bound_segment_id = self._binding.segment - self._original_bound_driver = self._binding.driver - else: - self._original_bound_segment_id = None - self._original_bound_driver = None - self._new_port_status = None - - @property - def current(self): - return self._port - - @property - def original(self): - return self._original_port - - @property - def network(self): - return self._network_context - - @property - def bound_segment(self): - id = self._binding.segment - if id: - for segment in self._network_context.network_segments: - if segment[api.ID] == id: - return segment - - @property - def original_bound_segment(self): - if self._original_bound_segment_id: - for segment in self._network_context.network_segments: - if segment[api.ID] == self._original_bound_segment_id: - return segment - - @property - def bound_driver(self): - return self._binding.driver - - @property - def original_bound_driver(self): - return self._original_bound_driver - - def host_agents(self, agent_type): - return self._plugin.get_agents(self._plugin_context, - filters={'agent_type': [agent_type], - 'host': [self._binding.host]}) - - def set_binding(self, segment_id, vif_type, vif_details, - status=None): - # TODO(rkukura) Verify binding allowed, segment in network - self._binding.segment = segment_id - self._binding.vif_type = vif_type - self._binding.vif_details = jsonutils.dumps(vif_details) - self._new_port_status = status diff --git a/neutron/plugins/ml2/drivers/README.fslsdn b/neutron/plugins/ml2/drivers/README.fslsdn deleted file mode 100644 index 09017284c..000000000 --- a/neutron/plugins/ml2/drivers/README.fslsdn +++ /dev/null @@ -1,102 +0,0 @@ -===================================================== -Freescale SDN Mechanism Driver for Neutron ML2 plugin -===================================================== - -Introduction -============ - -Freescale SDN (FSL-SDN) Mechanism Driver is an add-on support for ML2 plugin -for Neutron. - -It supports the Cloud Resource Discovery (CRD) service by updating -Network, Subnet and Port Create/Update/Delete data into the CRD database. - -CRD service manages network nodes, virtual network appliances and openflow -controller based network applications. - -Basic work flow ---------------- - -:: - - +---------------------------------+ - | | - | Neutron Server | - | (with ML2 plugin) | - | | - | +-------------------------------+ - | | Freescale SDN | - | | Mechanism Driver | - +-+--------+----------------------+ - | - | ReST API - | - +----------+-------------+ - | CRD server | - +------------------------+ - - - -How does Freescale SDN Mechanism Driver work? -=========================================== - -- Freescale Mechanism driver handles the following postcommit operations. - - Network create/update/delete - - Subnet create/update/delete - - Port create/delete - -Sequence diagram : create_network ---------------------------------- - -:: - - create_network - { - neutron -> ML2_plugin - ML2_plugin -> FSL-SDN-MD - FSL-SDN-MD -> crd_service - FSL-SDN-MD <-- crd_service - ML2_plugin <-- FSL-SDN-MD - neutron <-- ML2_plugin - } - -- Supported network types by FSL OF Controller include vlan and vxlan. - -- Freescale SDN mechanism driver handles VM port binding within in the - mechanism driver (like ODL MD). - -- 'bind_port' function verifies the supported network types (vlan,vxlan) - and calls context.set_binding with binding details. - -- Flow management in OVS is handled by Freescale Openflow Controller. - - -How to use Freescale SDN Mechanism Driver? -========================================== - -Configuring ML2 Plugin ----------------------- - -In [ml2] section of /etc/neutron/plugins/ml2/ml2_conf.ini, -modify 'mechanism_drivers' attributes as: - -:: - - mechanism_drivers = fslsdn - -Configuring FSLSDN Mechanism Driver ------------------------------------ - -Update /etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini, as below. - -:: - - [ml2_fslsdn] - crd_auth_strategy = keystone - crd_url = http://127.0.0.1:9797 - crd_auth_url = http://127.0.0.1:5000/v2.0/ - crd_tenant_name = service - crd_password = <-service-password-> - crd_user_name = <-service-username-> - -CRD service must be running in the controller. diff --git a/neutron/plugins/ml2/drivers/README.odl b/neutron/plugins/ml2/drivers/README.odl deleted file mode 100644 index eef8d4441..000000000 --- a/neutron/plugins/ml2/drivers/README.odl +++ /dev/null @@ -1,41 +0,0 @@ -OpenDaylight ML2 MechanismDriver -================================ -OpenDaylight is an Open Source SDN Controller developed by a plethora of -companies and hosted by the Linux Foundation. The OpenDaylight website -contains more information on the capabilities OpenDaylight provides: - - http://www.opendaylight.org - -Theory of operation -=================== -The OpenStack Neutron integration with OpenDaylight consists of the ML2 -MechanismDriver which acts as a REST proxy and passess all Neutron API -calls into OpenDaylight. OpenDaylight contains a NB REST service (called -the NeutronAPIService) which caches data from these proxied API calls and -makes it available to other services inside of OpenDaylight. One current -user of the SB side of the NeutronAPIService is the OVSDB code in -OpenDaylight. OVSDB uses the neutron information to isolate tenant networks -using GRE or VXLAN tunnels. - -How to use the OpenDaylight ML2 MechanismDriver -=============================================== -To use the ML2 MechanismDriver, you need to ensure you have it configured -as one of the "mechanism_drivers" in ML2: - - mechanism_drivers=opendaylight - -The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini -file or in a separate ml2_conf_odl.ini file. An example is shown below: - - [ml2_odl] - password = admin - username = admin - url = http://192.168.100.1:8080/controller/nb/v2/neutron - -When starting OpenDaylight, ensure you have the SimpleForwarding application -disabled or remove the .jar file from the plugins directory. Also ensure you -start OpenDaylight before you start OpenStack Neutron. - -There is devstack support for this which will automatically pull down OpenDaylight -and start it as part of devstack as well. The patch for this will likely merge -around the same time as this patch merges. diff --git a/neutron/plugins/ml2/drivers/__init__.py b/neutron/plugins/ml2/drivers/__init__.py deleted file mode 100644 index 788cea1f7..000000000 --- a/neutron/plugins/ml2/drivers/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ml2/drivers/brocade/README.md b/neutron/plugins/ml2/drivers/brocade/README.md deleted file mode 100644 index 5cf5a7571..000000000 --- a/neutron/plugins/ml2/drivers/brocade/README.md +++ /dev/null @@ -1,60 +0,0 @@ -Brocade ML2 Mechanism driver from ML2 plugin -============================================ - -* up-to-date version of these instructions are located at: - http://50.56.236.34/docs/brocade-ml2-mechanism.txt -* N.B.: Please see Prerequisites section regarding ncclient (netconf client library) -* Supports VCS (Virtual Cluster of Switches) -* Issues/Questions/Bugs: sharis@brocade.com - - - - 1. VDX 67xx series of switches - 2. VDX 87xx series of switches - -ML2 plugin requires mechanism driver to support configuring of hardware switches. -Brocade Mechanism for ML2 uses NETCONF at the backend to configure the Brocade switch. -Currently the mechanism drivers support VLANs only. - - +------------+ +------------+ +-------------+ - | | | | | | - Neutron | | | | | Brocade | - v2.0 | Openstack | | Brocade | NETCONF | VCS Switch | - ----+ Neutron +--------+ Mechanism +----------+ | - | ML2 | | Driver | | VDX 67xx | - | Plugin | | | | VDX 87xx | - | | | | | | - | | | | | | - +------------+ +------------+ +-------------+ - - -Configuration - -In order to use this mechnism the brocade configuration file needs to be edited with the appropriate -configuration information: - - % cat /etc/neutron/plugins/ml2/ml2_conf_brocade.ini - [switch] - username = admin - password = password - address = - ostype = NOS - physical_networks = phys1 - -Additionally the brocade mechanism driver needs to be enabled from the ml2 config file: - - % cat /etc/neutron/plugins/ml2/ml2_conf.ini - - [ml2] - tenant_network_types = vlan - type_drivers = local,flat,vlan,gre,vxlan - mechanism_drivers = openvswitch,brocade - # OR mechanism_drivers = openvswitch,linuxbridge,hyperv,brocade - ... - ... - ... - - -Required L2 Agent - -This mechanism driver works in conjunction with an L2 Agent. The agent should be loaded as well in order for it to configure the virtual network int the host machine. Please see the configuration above. Atleast one of linuxbridge or openvswitch must be specified. diff --git a/neutron/plugins/ml2/drivers/brocade/__init__.py b/neutron/plugins/ml2/drivers/brocade/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/brocade/db/__init__.py b/neutron/plugins/ml2/drivers/brocade/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/brocade/db/models.py b/neutron/plugins/ml2/drivers/brocade/db/models.py deleted file mode 100644 index 249540527..000000000 --- a/neutron/plugins/ml2/drivers/brocade/db/models.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2014 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Shiv Haris (sharis@brocade.com) -# Varma Bhupatiraju (vbhupati@#brocade.com) - - -"""Brocade specific database schema/model.""" -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 - - -class ML2_BrocadeNetwork(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Schema for brocade network.""" - - vlan = sa.Column(sa.String(10)) - segment_id = sa.Column(sa.String(36)) - network_type = sa.Column(sa.String(10)) - - -class ML2_BrocadePort(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Schema for brocade port.""" - - network_id = sa.Column(sa.String(36), - sa.ForeignKey("ml2_brocadenetworks.id"), - nullable=False) - admin_state_up = sa.Column(sa.Boolean, nullable=False) - physical_interface = sa.Column(sa.String(36)) - vlan_id = sa.Column(sa.String(36)) - - -def create_network(context, net_id, vlan, segment_id, network_type, tenant_id): - """Create a brocade specific network/port-profiles.""" - - # only network_type of vlan is supported - session = context.session - with session.begin(subtransactions=True): - net = get_network(context, net_id, None) - if not net: - net = ML2_BrocadeNetwork(id=net_id, vlan=vlan, - segment_id=segment_id, - network_type='vlan', - tenant_id=tenant_id) - session.add(net) - return net - - -def delete_network(context, net_id): - """Delete a brocade specific network/port-profiles.""" - - session = context.session - with session.begin(subtransactions=True): - net = get_network(context, net_id, None) - if net: - session.delete(net) - - -def get_network(context, net_id, fields=None): - """Get brocade specific network, with vlan extension.""" - - session = context.session - return session.query(ML2_BrocadeNetwork).filter_by(id=net_id).first() - - -def get_networks(context, filters=None, fields=None): - """Get all brocade specific networks.""" - - session = context.session - return session.query(ML2_BrocadeNetwork).all() - - -def create_port(context, port_id, network_id, physical_interface, - vlan_id, tenant_id, admin_state_up): - """Create a brocade specific port, has policy like vlan.""" - - session = context.session - with session.begin(subtransactions=True): - port = get_port(context, port_id) - if not port: - port = ML2_BrocadePort(id=port_id, - network_id=network_id, - physical_interface=physical_interface, - vlan_id=vlan_id, - admin_state_up=admin_state_up, - tenant_id=tenant_id) - session.add(port) - - return port - - -def get_port(context, port_id): - """get a brocade specific port.""" - - session = context.session - return session.query(ML2_BrocadePort).filter_by(id=port_id).first() - - -def get_ports(context, network_id=None): - """get a brocade specific port.""" - - session = context.session - return session.query(ML2_BrocadePort).filter_by( - network_id=network_id).all() - - -def delete_port(context, port_id): - """delete brocade specific port.""" - - session = context.session - with session.begin(subtransactions=True): - port = get_port(context, port_id) - if port: - session.delete(port) - - -def update_port_state(context, port_id, admin_state_up): - """Update port attributes.""" - - session = context.session - with session.begin(subtransactions=True): - session.query(ML2_BrocadePort).filter_by( - id=port_id).update({'admin_state_up': admin_state_up}) diff --git a/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py b/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py deleted file mode 100644 index 015921df5..000000000 --- a/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py +++ /dev/null @@ -1,385 +0,0 @@ -# Copyright 2014 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Author: -# Shiv Haris (shivharis@hotmail.com) - - -"""Implentation of Brocade ML2 Mechanism driver for ML2 Plugin.""" - -from oslo.config import cfg - -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.ml2 import driver_api -from neutron.plugins.ml2.drivers.brocade.db import models as brocade_db - -LOG = logging.getLogger(__name__) -MECHANISM_VERSION = 0.9 -NOS_DRIVER = 'neutron.plugins.ml2.drivers.brocade.nos.nosdriver.NOSdriver' - -ML2_BROCADE = [cfg.StrOpt('address', default='', - help=_('The address of the host to SSH to')), - cfg.StrOpt('username', default='admin', - help=_('The SSH username to use')), - cfg.StrOpt('password', default='password', secret=True, - help=_('The SSH password to use')), - cfg.StrOpt('physical_networks', default='', - help=_('Allowed physical networks')), - cfg.StrOpt('ostype', default='NOS', - help=_('Unused')) - ] - -cfg.CONF.register_opts(ML2_BROCADE, "ml2_brocade") - - -class BrocadeMechanism(driver_api.MechanismDriver): - """ML2 Mechanism driver for Brocade VDX switches. This is the upper - layer driver class that interfaces to lower layer (NETCONF) below. - - """ - - def __init__(self): - self._driver = None - self._physical_networks = None - self._switch = None - self.initialize() - - def initialize(self): - """Initilize of variables needed by this class.""" - - self._physical_networks = cfg.CONF.ml2_brocade.physical_networks - self.brocade_init() - - def brocade_init(self): - """Brocade specific initialization for this class.""" - - self._switch = {'address': cfg.CONF.ml2_brocade.address, - 'username': cfg.CONF.ml2_brocade.username, - 'password': cfg.CONF.ml2_brocade.password - } - self._driver = importutils.import_object(NOS_DRIVER) - - def create_network_precommit(self, mech_context): - """Create Network in the mechanism specific database table.""" - - network = mech_context.current - context = mech_context._plugin_context - tenant_id = network['tenant_id'] - network_id = network['id'] - - segments = mech_context.network_segments - # currently supports only one segment per network - segment = segments[0] - - network_type = segment['network_type'] - vlan_id = segment['segmentation_id'] - segment_id = segment['id'] - - if segment['physical_network'] not in self._physical_networks: - raise Exception( - _("Brocade Mechanism: failed to create network, " - "network cannot be created in the configured " - "physical network")) - - if network_type != 'vlan': - raise Exception( - _("Brocade Mechanism: failed to create network, " - "only network type vlan is supported")) - - try: - brocade_db.create_network(context, network_id, vlan_id, - segment_id, network_type, tenant_id) - except Exception: - LOG.exception( - _("Brocade Mechanism: failed to create network in db")) - raise Exception( - _("Brocade Mechanism: create_network_precommit failed")) - - LOG.info(_("create network (precommit): %(network_id)s " - "of network type = %(network_type)s " - "with vlan = %(vlan_id)s " - "for tenant %(tenant_id)s"), - {'network_id': network_id, - 'network_type': network_type, - 'vlan_id': vlan_id, - 'tenant_id': tenant_id}) - - def create_network_postcommit(self, mech_context): - """Create Network as a portprofile on the switch.""" - - LOG.debug(_("create_network_postcommit: called")) - - network = mech_context.current - # use network_id to get the network attributes - # ONLY depend on our db for getting back network attributes - # this is so we can replay postcommit from db - context = mech_context._plugin_context - - network_id = network['id'] - network = brocade_db.get_network(context, network_id) - network_type = network['network_type'] - tenant_id = network['tenant_id'] - vlan_id = network['vlan'] - - try: - self._driver.create_network(self._switch['address'], - self._switch['username'], - self._switch['password'], - vlan_id) - except Exception: - LOG.exception(_("Brocade NOS driver: failed in create network")) - brocade_db.delete_network(context, network_id) - raise Exception( - _("Brocade Mechanism: create_network_postcommmit failed")) - - LOG.info(_("created network (postcommit): %(network_id)s" - " of network type = %(network_type)s" - " with vlan = %(vlan_id)s" - " for tenant %(tenant_id)s"), - {'network_id': network_id, - 'network_type': network_type, - 'vlan_id': vlan_id, - 'tenant_id': tenant_id}) - - def delete_network_precommit(self, mech_context): - """Delete Network from the plugin specific database table.""" - - LOG.debug(_("delete_network_precommit: called")) - - network = mech_context.current - network_id = network['id'] - vlan_id = network['provider:segmentation_id'] - tenant_id = network['tenant_id'] - - context = mech_context._plugin_context - - try: - brocade_db.delete_network(context, network_id) - except Exception: - LOG.exception( - _("Brocade Mechanism: failed to delete network in db")) - raise Exception( - _("Brocade Mechanism: delete_network_precommit failed")) - - LOG.info(_("delete network (precommit): %(network_id)s" - " with vlan = %(vlan_id)s" - " for tenant %(tenant_id)s"), - {'network_id': network_id, - 'vlan_id': vlan_id, - 'tenant_id': tenant_id}) - - def delete_network_postcommit(self, mech_context): - """Delete network which translates to removng portprofile - from the switch. - """ - - LOG.debug(_("delete_network_postcommit: called")) - network = mech_context.current - network_id = network['id'] - vlan_id = network['provider:segmentation_id'] - tenant_id = network['tenant_id'] - - try: - self._driver.delete_network(self._switch['address'], - self._switch['username'], - self._switch['password'], - vlan_id) - except Exception: - LOG.exception(_("Brocade NOS driver: failed to delete network")) - raise Exception( - _("Brocade switch exception, " - "delete_network_postcommit failed")) - - LOG.info(_("delete network (postcommit): %(network_id)s" - " with vlan = %(vlan_id)s" - " for tenant %(tenant_id)s"), - {'network_id': network_id, - 'vlan_id': vlan_id, - 'tenant_id': tenant_id}) - - def update_network_precommit(self, mech_context): - """Noop now, it is left here for future.""" - pass - - def update_network_postcommit(self, mech_context): - """Noop now, it is left here for future.""" - pass - - def create_port_precommit(self, mech_context): - """Create logical port on the switch (db update).""" - - LOG.debug(_("create_port_precommit: called")) - - port = mech_context.current - port_id = port['id'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - admin_state_up = port['admin_state_up'] - - context = mech_context._plugin_context - - network = brocade_db.get_network(context, network_id) - vlan_id = network['vlan'] - - try: - brocade_db.create_port(context, port_id, network_id, - None, - vlan_id, tenant_id, admin_state_up) - except Exception: - LOG.exception(_("Brocade Mechanism: failed to create port in db")) - raise Exception( - _("Brocade Mechanism: create_port_precommit failed")) - - def create_port_postcommit(self, mech_context): - """Associate the assigned MAC address to the portprofile.""" - - LOG.debug(_("create_port_postcommit: called")) - - port = mech_context.current - port_id = port['id'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - - context = mech_context._plugin_context - - network = brocade_db.get_network(context, network_id) - vlan_id = network['vlan'] - - interface_mac = port['mac_address'] - - # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx - mac = self.mac_reformat_62to34(interface_mac) - try: - self._driver.associate_mac_to_network(self._switch['address'], - self._switch['username'], - self._switch['password'], - vlan_id, - mac) - except Exception: - LOG.exception( - _("Brocade NOS driver: failed to associate mac %s") - % interface_mac) - raise Exception( - _("Brocade switch exception: create_port_postcommit failed")) - - LOG.info( - _("created port (postcommit): port_id=%(port_id)s" - " network_id=%(network_id)s tenant_id=%(tenant_id)s"), - {'port_id': port_id, - 'network_id': network_id, 'tenant_id': tenant_id}) - - def delete_port_precommit(self, mech_context): - """Delete logical port on the switch (db update).""" - - LOG.debug(_("delete_port_precommit: called")) - port = mech_context.current - port_id = port['id'] - - context = mech_context._plugin_context - - try: - brocade_db.delete_port(context, port_id) - except Exception: - LOG.exception(_("Brocade Mechanism: failed to delete port in db")) - raise Exception( - _("Brocade Mechanism: delete_port_precommit failed")) - - def delete_port_postcommit(self, mech_context): - """Dissociate MAC address from the portprofile.""" - - LOG.debug(_("delete_port_postcommit: called")) - port = mech_context.current - port_id = port['id'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - - context = mech_context._plugin_context - - network = brocade_db.get_network(context, network_id) - vlan_id = network['vlan'] - - interface_mac = port['mac_address'] - - # convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx - mac = self.mac_reformat_62to34(interface_mac) - try: - self._driver.dissociate_mac_from_network( - self._switch['address'], - self._switch['username'], - self._switch['password'], - vlan_id, - mac) - except Exception: - LOG.exception( - _("Brocade NOS driver: failed to dissociate MAC %s") % - interface_mac) - raise Exception( - _("Brocade switch exception, delete_port_postcommit failed")) - - LOG.info( - _("delete port (postcommit): port_id=%(port_id)s" - " network_id=%(network_id)s tenant_id=%(tenant_id)s"), - {'port_id': port_id, - 'network_id': network_id, 'tenant_id': tenant_id}) - - def update_port_precommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("update_port_precommit(self: called")) - - def update_port_postcommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("update_port_postcommit: called")) - - def create_subnet_precommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("create_subnetwork_precommit: called")) - - def create_subnet_postcommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("create_subnetwork_postcommit: called")) - - def delete_subnet_precommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("delete_subnetwork_precommit: called")) - - def delete_subnet_postcommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("delete_subnetwork_postcommit: called")) - - def update_subnet_precommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("update_subnet_precommit(self: called")) - - def update_subnet_postcommit(self, mech_context): - """Noop now, it is left here for future.""" - LOG.debug(_("update_subnet_postcommit: called")) - - @staticmethod - def mac_reformat_62to34(interface_mac): - """Transform MAC address format. - - Transforms from 6 groups of 2 hexadecimal numbers delimited by ":" - to 3 groups of 4 hexadecimals numbers delimited by ".". - - :param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx - :type interface_mac: string - :returns: MAC address in the format xxxx.xxxx.xxxx - :rtype: string - """ - - mac = interface_mac.replace(":", "") - mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12] - return mac diff --git a/neutron/plugins/ml2/drivers/brocade/nos/__init__.py b/neutron/plugins/ml2/drivers/brocade/nos/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py b/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py deleted file mode 100644 index dbf7575de..000000000 --- a/neutron/plugins/ml2/drivers/brocade/nos/nctemplates.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) 2014 Brocade Communications Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@#brocade.com) -# Shiv Haris (sharis@brocade.com) - - -"""NOS NETCONF XML Configuration Command Templates. - -Interface Configuration Commands -""" - -# Create VLAN (vlan_id) -CREATE_VLAN_INTERFACE = """ - - - - - {vlan_id} - - - - -""" - -# Delete VLAN (vlan_id) -DELETE_VLAN_INTERFACE = """ - - - - - {vlan_id} - - - - -""" - -# -# AMPP Life-cycle Management Configuration Commands -# - -# Create AMPP port-profile (port_profile_name) -CREATE_PORT_PROFILE = """ - - - {name} - - -""" - -# Create VLAN sub-profile for port-profile (port_profile_name) -CREATE_VLAN_PROFILE_FOR_PORT_PROFILE = """ - - - {name} - - - -""" - -# Configure L2 mode for VLAN sub-profile (port_profile_name) -CONFIGURE_L2_MODE_FOR_VLAN_PROFILE = """ - - - {name} - - - - - -""" - -# Configure trunk mode for VLAN sub-profile (port_profile_name) -CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE = """ - - - {name} - - - - trunk - - - - - -""" - -# Configure allowed VLANs for VLAN sub-profile -# (port_profile_name, allowed_vlan, native_vlan) -CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE = """ - - - {name} - - - - - - {vlan_id} - - - - - - - -""" - -# Delete port-profile (port_profile_name) -DELETE_PORT_PROFILE = """ - - - {name} - - -""" - -# Activate port-profile (port_profile_name) -ACTIVATE_PORT_PROFILE = """ - - - - {name} - - - - -""" - -# Deactivate port-profile (port_profile_name) -DEACTIVATE_PORT_PROFILE = """ - - - - {name} - - - - -""" - -# Associate MAC address to port-profile (port_profile_name, mac_address) -ASSOCIATE_MAC_TO_PORT_PROFILE = """ - - - - {name} - - {mac_address} - - - - -""" - -# Dissociate MAC address from port-profile (port_profile_name, mac_address) -DISSOCIATE_MAC_FROM_PORT_PROFILE = """ - - - - {name} - - {mac_address} - - - - -""" - -# -# Constants -# - -# Port profile naming convention for Neutron networks -OS_PORT_PROFILE_NAME = "openstack-profile-{id}" - -# Port profile filter expressions -PORT_PROFILE_XPATH_FILTER = "/port-profile" -PORT_PROFILE_NAME_XPATH_FILTER = "/port-profile[name='{name}']" diff --git a/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py b/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py deleted file mode 100644 index f647370ae..000000000 --- a/neutron/plugins/ml2/drivers/brocade/nos/nosdriver.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright 2014 Brocade Communications System, Inc. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# Authors: -# Varma Bhupatiraju (vbhupati@brocade.com) -# Shiv Haris (shivharis@hotmail.com) - - -"""Brocade NOS Driver implements NETCONF over SSHv2 for -Neutron network life-cycle management. -""" - -from ncclient import manager - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.ml2.drivers.brocade.nos import nctemplates as template - - -LOG = logging.getLogger(__name__) -SSH_PORT = 22 - - -def nos_unknown_host_cb(host, fingerprint): - """An unknown host callback. - - Returns `True` if it finds the key acceptable, - and `False` if not. This default callback for NOS always returns 'True' - (i.e. trusts all hosts for now). - """ - return True - - -class NOSdriver(): - """NOS NETCONF interface driver for Neutron network. - - Handles life-cycle management of Neutron network (leverages AMPP on NOS) - """ - - def __init__(self): - self.mgr = None - - def connect(self, host, username, password): - """Connect via SSH and initialize the NETCONF session.""" - - # Use the persisted NETCONF connection - if self.mgr and self.mgr.connected: - return self.mgr - - # check if someone forgot to edit the conf file with real values - if host == '': - raise Exception(_("Brocade Switch IP address is not set, " - "check config ml2_conf_brocade.ini file")) - - # Open new NETCONF connection - try: - self.mgr = manager.connect(host=host, port=SSH_PORT, - username=username, password=password, - unknown_host_cb=nos_unknown_host_cb) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Connect failed to switch")) - - LOG.debug(_("Connect success to host %(host)s:%(ssh_port)d"), - dict(host=host, ssh_port=SSH_PORT)) - return self.mgr - - def close_session(self): - """Close NETCONF session.""" - if self.mgr: - self.mgr.close_session() - self.mgr = None - - def create_network(self, host, username, password, net_id): - """Creates a new virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.create_vlan_interface(mgr, net_id) - self.create_port_profile(mgr, name) - self.create_vlan_profile_for_port_profile(mgr, name) - self.configure_l2_mode_for_vlan_profile(mgr, name) - self.configure_trunk_mode_for_vlan_profile(mgr, name) - self.configure_allowed_vlans_for_vlan_profile(mgr, name, net_id) - self.activate_port_profile(mgr, name) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error")) - self.close_session() - - def delete_network(self, host, username, password, net_id): - """Deletes a virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.deactivate_port_profile(mgr, name) - self.delete_port_profile(mgr, name) - self.delete_vlan_interface(mgr, net_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error")) - self.close_session() - - def associate_mac_to_network(self, host, username, password, - net_id, mac): - """Associates a MAC address to virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.associate_mac_to_port_profile(mgr, name, mac) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error")) - self.close_session() - - def dissociate_mac_from_network(self, host, username, password, - net_id, mac): - """Dissociates a MAC address from virtual network.""" - - name = template.OS_PORT_PROFILE_NAME.format(id=net_id) - try: - mgr = self.connect(host, username, password) - self.dissociate_mac_from_port_profile(mgr, name, mac) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("NETCONF error")) - self.close_session() - - def create_vlan_interface(self, mgr, vlan_id): - """Configures a VLAN interface.""" - - confstr = template.CREATE_VLAN_INTERFACE.format(vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) - - def delete_vlan_interface(self, mgr, vlan_id): - """Deletes a VLAN interface.""" - - confstr = template.DELETE_VLAN_INTERFACE.format(vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) - - def get_port_profiles(self, mgr): - """Retrieves all port profiles.""" - - filterstr = template.PORT_PROFILE_XPATH_FILTER - response = mgr.get_config(source='running', - filter=('xpath', filterstr)).data_xml - return response - - def get_port_profile(self, mgr, name): - """Retrieves a port profile.""" - - filterstr = template.PORT_PROFILE_NAME_XPATH_FILTER.format(name=name) - response = mgr.get_config(source='running', - filter=('xpath', filterstr)).data_xml - return response - - def create_port_profile(self, mgr, name): - """Creates a port profile.""" - - confstr = template.CREATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def delete_port_profile(self, mgr, name): - """Deletes a port profile.""" - - confstr = template.DELETE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def activate_port_profile(self, mgr, name): - """Activates a port profile.""" - - confstr = template.ACTIVATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def deactivate_port_profile(self, mgr, name): - """Deactivates a port profile.""" - - confstr = template.DEACTIVATE_PORT_PROFILE.format(name=name) - mgr.edit_config(target='running', config=confstr) - - def associate_mac_to_port_profile(self, mgr, name, mac_address): - """Associates a MAC address to a port profile.""" - - confstr = template.ASSOCIATE_MAC_TO_PORT_PROFILE.format( - name=name, mac_address=mac_address) - mgr.edit_config(target='running', config=confstr) - - def dissociate_mac_from_port_profile(self, mgr, name, mac_address): - """Dissociates a MAC address from a port profile.""" - - confstr = template.DISSOCIATE_MAC_FROM_PORT_PROFILE.format( - name=name, mac_address=mac_address) - mgr.edit_config(target='running', config=confstr) - - def create_vlan_profile_for_port_profile(self, mgr, name): - """Creates VLAN sub-profile for port profile.""" - - confstr = template.CREATE_VLAN_PROFILE_FOR_PORT_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_l2_mode_for_vlan_profile(self, mgr, name): - """Configures L2 mode for VLAN sub-profile.""" - - confstr = template.CONFIGURE_L2_MODE_FOR_VLAN_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_trunk_mode_for_vlan_profile(self, mgr, name): - """Configures trunk mode for VLAN sub-profile.""" - - confstr = template.CONFIGURE_TRUNK_MODE_FOR_VLAN_PROFILE.format( - name=name) - mgr.edit_config(target='running', config=confstr) - - def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id): - """Configures allowed VLANs for VLAN sub-profile.""" - - confstr = template.CONFIGURE_ALLOWED_VLANS_FOR_VLAN_PROFILE.format( - name=name, vlan_id=vlan_id) - mgr.edit_config(target='running', config=confstr) diff --git a/neutron/plugins/ml2/drivers/cisco/__init__.py b/neutron/plugins/ml2/drivers/cisco/__init__.py deleted file mode 100644 index 788cea1f7..000000000 --- a/neutron/plugins/ml2/drivers/cisco/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ml2/drivers/cisco/apic/__init__.py b/neutron/plugins/ml2/drivers/cisco/apic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py deleted file mode 100644 index 202e84c1c..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/apic_client.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright (c) 2014 Cisco Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Henry Gessau, Cisco Systems - -import collections -import time - -import requests -import requests.exceptions - -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc - - -LOG = logging.getLogger(__name__) - -APIC_CODE_FORBIDDEN = str(requests.codes.forbidden) - - -# Info about a Managed Object's relative name (RN) and container. -class ManagedObjectName(collections.namedtuple( - 'MoPath', ['container', 'rn_fmt', 'can_create'])): - def __new__(cls, container, rn_fmt, can_create=True): - return super(ManagedObjectName, cls).__new__(cls, container, rn_fmt, - can_create) - - -class ManagedObjectClass(object): - - """Information about a Managed Object (MO) class. - - Constructs and keeps track of the distinguished name (DN) and relative - name (RN) of a managed object (MO) class. The DN is the RN of the MO - appended to the recursive RNs of its containers, i.e.: - DN = uni/container-RN/.../container-RN/object-RN - - Also keeps track of whether the MO can be created in the APIC, as some - MOs are read-only or used for specifying relationships. - """ - - supported_mos = { - 'fvTenant': ManagedObjectName(None, 'tn-%s'), - 'fvBD': ManagedObjectName('fvTenant', 'BD-%s'), - 'fvRsBd': ManagedObjectName('fvAEPg', 'rsbd'), - 'fvSubnet': ManagedObjectName('fvBD', 'subnet-[%s]'), - 'fvCtx': ManagedObjectName('fvTenant', 'ctx-%s'), - 'fvRsCtx': ManagedObjectName('fvBD', 'rsctx'), - 'fvAp': ManagedObjectName('fvTenant', 'ap-%s'), - 'fvAEPg': ManagedObjectName('fvAp', 'epg-%s'), - 'fvRsProv': ManagedObjectName('fvAEPg', 'rsprov-%s'), - 'fvRsCons': ManagedObjectName('fvAEPg', 'rscons-%s'), - 'fvRsConsIf': ManagedObjectName('fvAEPg', 'rsconsif-%s'), - 'fvRsDomAtt': ManagedObjectName('fvAEPg', 'rsdomAtt-[%s]'), - 'fvRsPathAtt': ManagedObjectName('fvAEPg', 'rspathAtt-[%s]'), - - 'vzBrCP': ManagedObjectName('fvTenant', 'brc-%s'), - 'vzSubj': ManagedObjectName('vzBrCP', 'subj-%s'), - 'vzFilter': ManagedObjectName('fvTenant', 'flt-%s'), - 'vzRsFiltAtt': ManagedObjectName('vzSubj', 'rsfiltAtt-%s'), - 'vzEntry': ManagedObjectName('vzFilter', 'e-%s'), - 'vzInTerm': ManagedObjectName('vzSubj', 'intmnl'), - 'vzRsFiltAtt__In': ManagedObjectName('vzInTerm', 'rsfiltAtt-%s'), - 'vzOutTerm': ManagedObjectName('vzSubj', 'outtmnl'), - 'vzRsFiltAtt__Out': ManagedObjectName('vzOutTerm', 'rsfiltAtt-%s'), - 'vzCPIf': ManagedObjectName('fvTenant', 'cif-%s'), - 'vzRsIf': ManagedObjectName('vzCPIf', 'rsif'), - - 'vmmProvP': ManagedObjectName(None, 'vmmp-%s', False), - 'vmmDomP': ManagedObjectName('vmmProvP', 'dom-%s'), - 'vmmEpPD': ManagedObjectName('vmmDomP', 'eppd-[%s]'), - - 'physDomP': ManagedObjectName(None, 'phys-%s'), - - 'infra': ManagedObjectName(None, 'infra'), - 'infraNodeP': ManagedObjectName('infra', 'nprof-%s'), - 'infraLeafS': ManagedObjectName('infraNodeP', 'leaves-%s-typ-%s'), - 'infraNodeBlk': ManagedObjectName('infraLeafS', 'nodeblk-%s'), - 'infraRsAccPortP': ManagedObjectName('infraNodeP', 'rsaccPortP-[%s]'), - 'infraAccPortP': ManagedObjectName('infra', 'accportprof-%s'), - 'infraHPortS': ManagedObjectName('infraAccPortP', 'hports-%s-typ-%s'), - 'infraPortBlk': ManagedObjectName('infraHPortS', 'portblk-%s'), - 'infraRsAccBaseGrp': ManagedObjectName('infraHPortS', 'rsaccBaseGrp'), - 'infraFuncP': ManagedObjectName('infra', 'funcprof'), - 'infraAccPortGrp': ManagedObjectName('infraFuncP', 'accportgrp-%s'), - 'infraRsAttEntP': ManagedObjectName('infraAccPortGrp', 'rsattEntP'), - 'infraAttEntityP': ManagedObjectName('infra', 'attentp-%s'), - 'infraRsDomP': ManagedObjectName('infraAttEntityP', 'rsdomP-[%s]'), - 'infraRsVlanNs__phys': ManagedObjectName('physDomP', 'rsvlanNs'), - 'infraRsVlanNs__vmm': ManagedObjectName('vmmDomP', 'rsvlanNs'), - - 'fvnsVlanInstP': ManagedObjectName('infra', 'vlanns-%s-%s'), - 'fvnsEncapBlk__vlan': ManagedObjectName('fvnsVlanInstP', - 'from-%s-to-%s'), - 'fvnsVxlanInstP': ManagedObjectName('infra', 'vxlanns-%s'), - 'fvnsEncapBlk__vxlan': ManagedObjectName('fvnsVxlanInstP', - 'from-%s-to-%s'), - - # Read-only - 'fabricTopology': ManagedObjectName(None, 'topology', False), - 'fabricPod': ManagedObjectName('fabricTopology', 'pod-%s', False), - 'fabricPathEpCont': ManagedObjectName('fabricPod', 'paths-%s', False), - 'fabricPathEp': ManagedObjectName('fabricPathEpCont', 'pathep-%s', - False), - } - - # Note(Henry): The use of a mutable default argument _inst_cache is - # intentional. It persists for the life of MoClass to cache instances. - # noinspection PyDefaultArgument - def __new__(cls, mo_class, _inst_cache={}): - """Ensure we create only one instance per mo_class.""" - try: - return _inst_cache[mo_class] - except KeyError: - new_inst = super(ManagedObjectClass, cls).__new__(cls) - new_inst.__init__(mo_class) - _inst_cache[mo_class] = new_inst - return new_inst - - def __init__(self, mo_class): - self.klass = mo_class - self.klass_name = mo_class.split('__')[0] - mo = self.supported_mos[mo_class] - self.container = mo.container - self.rn_fmt = mo.rn_fmt - self.dn_fmt, self.args = self._dn_fmt() - self.arg_count = self.dn_fmt.count('%s') - rn_has_arg = self.rn_fmt.count('%s') - self.can_create = rn_has_arg and mo.can_create - - def _dn_fmt(self): - """Build the distinguished name format using container and RN. - - DN = uni/container-RN/.../container-RN/object-RN - - Also make a list of the required name arguments. - Note: Call this method only once at init. - """ - arg = [self.klass] if '%s' in self.rn_fmt else [] - if self.container: - container = ManagedObjectClass(self.container) - dn_fmt = '%s/%s' % (container.dn_fmt, self.rn_fmt) - args = container.args + arg - return dn_fmt, args - return 'uni/%s' % self.rn_fmt, arg - - def dn(self, *args): - """Return the distinguished name for a managed object.""" - return self.dn_fmt % args - - -class ApicSession(object): - - """Manages a session with the APIC.""" - - def __init__(self, host, port, usr, pwd, ssl): - protocol = ssl and 'https' or 'http' - self.api_base = '%s://%s:%s/api' % (protocol, host, port) - self.session = requests.Session() - self.session_deadline = 0 - self.session_timeout = 0 - self.cookie = {} - - # Log in - self.authentication = None - self.username = None - self.password = None - if usr and pwd: - self.login(usr, pwd) - - @staticmethod - def _make_data(key, **attrs): - """Build the body for a msg out of a key and some attributes.""" - return json.dumps({key: {'attributes': attrs}}) - - def _api_url(self, api): - """Create the URL for a generic API.""" - return '%s/%s.json' % (self.api_base, api) - - def _mo_url(self, mo, *args): - """Create a URL for a MO lookup by DN.""" - dn = mo.dn(*args) - return '%s/mo/%s.json' % (self.api_base, dn) - - def _qry_url(self, mo): - """Create a URL for a query lookup by MO class.""" - return '%s/class/%s.json' % (self.api_base, mo.klass_name) - - def _check_session(self): - """Check that we are logged in and ensure the session is active.""" - if not self.authentication: - raise cexc.ApicSessionNotLoggedIn - if time.time() > self.session_deadline: - self.refresh() - - def _send(self, request, url, data=None, refreshed=None): - """Send a request and process the response.""" - if data is None: - response = request(url, cookies=self.cookie) - else: - response = request(url, data=data, cookies=self.cookie) - if response is None: - raise cexc.ApicHostNoResponse(url=url) - # Every request refreshes the timeout - self.session_deadline = time.time() + self.session_timeout - if data is None: - request_str = url - else: - request_str = '%s, data=%s' % (url, data) - LOG.debug(_("data = %s"), data) - # imdata is where the APIC returns the useful information - imdata = response.json().get('imdata') - LOG.debug(_("Response: %s"), imdata) - if response.status_code != requests.codes.ok: - try: - err_code = imdata[0]['error']['attributes']['code'] - err_text = imdata[0]['error']['attributes']['text'] - except (IndexError, KeyError): - err_code = '[code for APIC error not found]' - err_text = '[text for APIC error not found]' - # If invalid token then re-login and retry once - if (not refreshed and err_code == APIC_CODE_FORBIDDEN and - err_text.lower().startswith('token was invalid')): - self.login() - return self._send(request, url, data=data, refreshed=True) - raise cexc.ApicResponseNotOk(request=request_str, - status=response.status_code, - reason=response.reason, - err_text=err_text, err_code=err_code) - return imdata - - # REST requests - - def get_data(self, request): - """Retrieve generic data from the server.""" - self._check_session() - url = self._api_url(request) - return self._send(self.session.get, url) - - def get_mo(self, mo, *args): - """Retrieve a managed object by its distinguished name.""" - self._check_session() - url = self._mo_url(mo, *args) + '?query-target=self' - return self._send(self.session.get, url) - - def list_mo(self, mo): - """Retrieve the list of managed objects for a class.""" - self._check_session() - url = self._qry_url(mo) - return self._send(self.session.get, url) - - def post_data(self, request, data): - """Post generic data to the server.""" - self._check_session() - url = self._api_url(request) - return self._send(self.session.post, url, data=data) - - def post_mo(self, mo, *args, **kwargs): - """Post data for a managed object to the server.""" - self._check_session() - url = self._mo_url(mo, *args) - data = self._make_data(mo.klass_name, **kwargs) - return self._send(self.session.post, url, data=data) - - # Session management - - def _save_cookie(self, request, response): - """Save the session cookie and its expiration time.""" - imdata = response.json().get('imdata') - if response.status_code == requests.codes.ok: - attributes = imdata[0]['aaaLogin']['attributes'] - try: - self.cookie = {'APIC-Cookie': attributes['token']} - except KeyError: - raise cexc.ApicResponseNoCookie(request=request) - timeout = int(attributes['refreshTimeoutSeconds']) - LOG.debug(_("APIC session will expire in %d seconds"), timeout) - # Give ourselves a few seconds to refresh before timing out - self.session_timeout = timeout - 5 - self.session_deadline = time.time() + self.session_timeout - else: - attributes = imdata[0]['error']['attributes'] - return attributes - - def login(self, usr=None, pwd=None): - """Log in to controller. Save user name and authentication.""" - usr = usr or self.username - pwd = pwd or self.password - name_pwd = self._make_data('aaaUser', name=usr, pwd=pwd) - url = self._api_url('aaaLogin') - try: - response = self.session.post(url, data=name_pwd, timeout=10.0) - except requests.exceptions.Timeout: - raise cexc.ApicHostNoResponse(url=url) - attributes = self._save_cookie('aaaLogin', response) - if response.status_code == requests.codes.ok: - self.username = usr - self.password = pwd - self.authentication = attributes - else: - self.authentication = None - raise cexc.ApicResponseNotOk(request=url, - status=response.status_code, - reason=response.reason, - err_text=attributes['text'], - err_code=attributes['code']) - - def refresh(self): - """Called when a session has timed out or almost timed out.""" - url = self._api_url('aaaRefresh') - response = self.session.get(url, cookies=self.cookie) - attributes = self._save_cookie('aaaRefresh', response) - if response.status_code == requests.codes.ok: - # We refreshed before the session timed out. - self.authentication = attributes - else: - err_code = attributes['code'] - err_text = attributes['text'] - if (err_code == APIC_CODE_FORBIDDEN and - err_text.lower().startswith('token was invalid')): - # This means the token timed out, so log in again. - LOG.debug(_("APIC session timed-out, logging in again.")) - self.login() - else: - self.authentication = None - raise cexc.ApicResponseNotOk(request=url, - status=response.status_code, - reason=response.reason, - err_text=err_text, - err_code=err_code) - - def logout(self): - """End session with controller.""" - if not self.username: - self.authentication = None - if self.authentication: - data = self._make_data('aaaUser', name=self.username) - self.post_data('aaaLogout', data=data) - self.authentication = None - - -class ManagedObjectAccess(object): - - """CRUD operations on APIC Managed Objects.""" - - def __init__(self, session, mo_class): - self.session = session - self.mo = ManagedObjectClass(mo_class) - - def _create_container(self, *args): - """Recursively create all container objects.""" - if self.mo.container: - container = ManagedObjectAccess(self.session, self.mo.container) - if container.mo.can_create: - container_args = args[0: container.mo.arg_count] - container._create_container(*container_args) - container.session.post_mo(container.mo, *container_args) - - def create(self, *args, **kwargs): - self._create_container(*args) - if self.mo.can_create and 'status' not in kwargs: - kwargs['status'] = 'created' - return self.session.post_mo(self.mo, *args, **kwargs) - - def _mo_attributes(self, obj_data): - if (self.mo.klass_name in obj_data and - 'attributes' in obj_data[self.mo.klass_name]): - return obj_data[self.mo.klass_name]['attributes'] - - def get(self, *args): - """Return a dict of the MO's attributes, or None.""" - imdata = self.session.get_mo(self.mo, *args) - if imdata: - return self._mo_attributes(imdata[0]) - - def list_all(self): - imdata = self.session.list_mo(self.mo) - return filter(None, [self._mo_attributes(obj) for obj in imdata]) - - def list_names(self): - return [obj['name'] for obj in self.list_all()] - - def update(self, *args, **kwargs): - return self.session.post_mo(self.mo, *args, **kwargs) - - def delete(self, *args): - return self.session.post_mo(self.mo, *args, status='deleted') - - -class RestClient(ApicSession): - - """APIC REST client for OpenStack Neutron.""" - - def __init__(self, host, port=80, usr=None, pwd=None, ssl=False): - """Establish a session with the APIC.""" - super(RestClient, self).__init__(host, port, usr, pwd, ssl) - - def __getattr__(self, mo_class): - """Add supported MOs as properties on demand.""" - if mo_class not in ManagedObjectClass.supported_mos: - raise cexc.ApicManagedObjectNotSupported(mo_class=mo_class) - self.__dict__[mo_class] = ManagedObjectAccess(self, mo_class) - return self.__dict__[mo_class] diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py deleted file mode 100644 index f86aa597d..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/apic_manager.py +++ /dev/null @@ -1,559 +0,0 @@ -# Copyright (c) 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. - -import itertools -import uuid - -from oslo.config import cfg - -from neutron.openstack.common import excutils -from neutron.plugins.ml2.drivers.cisco.apic import apic_client -from neutron.plugins.ml2.drivers.cisco.apic import apic_model -from neutron.plugins.ml2.drivers.cisco.apic import config -from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc - -AP_NAME = 'openstack' -CONTEXT_ENFORCED = '1' -CONTEXT_UNENFORCED = '2' -CONTEXT_DEFAULT = 'default' -DN_KEY = 'dn' -PORT_DN_PATH = 'topology/pod-1/paths-%s/pathep-[eth%s]' -SCOPE_GLOBAL = 'global' -SCOPE_TENANT = 'tenant' -TENANT_COMMON = 'common' - - -def group_by_ranges(i): - """Group a list of numbers into tuples representing contiguous ranges.""" - for a, b in itertools.groupby(enumerate(sorted(i)), lambda (x, y): y - x): - b = list(b) - yield b[0][1], b[-1][1] - - -class APICManager(object): - """Class to manage APIC translations and workflow. - - This class manages translation from Neutron objects to APIC - managed objects and contains workflows to implement these - translations. - """ - def __init__(self): - self.db = apic_model.ApicDbModel() - - apic_conf = cfg.CONF.ml2_cisco_apic - self.switch_dict = config.create_switch_dictionary() - - # Connect to the the APIC - self.apic = apic_client.RestClient( - apic_conf.apic_host, - apic_conf.apic_port, - apic_conf.apic_username, - apic_conf.apic_password - ) - - self.port_profiles = {} - self.vmm_domain = None - self.phys_domain = None - self.vlan_ns = None - self.node_profiles = {} - self.entity_profile = None - self.function_profile = None - self.clear_node_profiles = apic_conf.apic_clear_node_profiles - - def ensure_infra_created_on_apic(self): - """Ensure the infrastructure is setup. - - Loop over the switch dictionary from the config and - setup profiles for switches, modules and ports - """ - # Loop over switches - for switch in self.switch_dict: - # Create a node profile for this switch - self.ensure_node_profile_created_for_switch(switch) - - # Check if a port profile exists for this node - ppname = self.check_infra_port_profiles(switch) - - # Gather port ranges for this switch - modules = self.gather_infra_module_ports(switch) - - # Setup each module and port range - for module in modules: - profile = self.db.get_profile_for_module(switch, ppname, - module) - if not profile: - # Create host port selector for this module - hname = uuid.uuid4() - try: - self.apic.infraHPortS.create(ppname, hname, 'range') - # Add relation to the function profile - fpdn = self.function_profile[DN_KEY] - self.apic.infraRsAccBaseGrp.create(ppname, hname, - 'range', tDn=fpdn) - modules[module].sort() - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - self.apic.infraHPortS.delete(ppname, hname, - 'range') - else: - hname = profile.hpselc_id - - ranges = group_by_ranges(modules[module]) - # Add this module and ports to the profile - for prange in ranges: - # Check if this port block is already added to the profile - if not self.db.get_profile_for_module_and_ports( - switch, ppname, module, prange[0], prange[-1]): - # Create port block for this port range - pbname = uuid.uuid4() - self.apic.infraPortBlk.create(ppname, hname, 'range', - pbname, fromCard=module, - toCard=module, - fromPort=str(prange[0]), - toPort=str(prange[-1])) - # Add DB row - self.db.add_profile_for_module_and_ports( - switch, ppname, hname, module, - prange[0], prange[-1]) - - def check_infra_port_profiles(self, switch): - """Check and create infra port profiles for a node.""" - sprofile = self.db.get_port_profile_for_node(switch) - ppname = None - if not sprofile: - # Generate uuid for port profile name - ppname = uuid.uuid4() - try: - # Create port profile for this switch - pprofile = self.ensure_port_profile_created_on_apic(ppname) - # Add port profile to node profile - ppdn = pprofile[DN_KEY] - self.apic.infraRsAccPortP.create(switch, ppdn) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete port profile - self.apic.infraAccPortP.delete(ppname) - else: - ppname = sprofile.profile_id - - return ppname - - def gather_infra_module_ports(self, switch): - """Build modules and ports per module dictionary.""" - ports = self.switch_dict[switch] - # Gather common modules - modules = {} - for port in ports: - module, sw_port = port.split('/') - if module not in modules: - modules[module] = [] - modules[module].append(int(sw_port)) - - return modules - - def ensure_context_unenforced(self, tenant_id=TENANT_COMMON, - name=CONTEXT_DEFAULT): - """Set the specified tenant's context to unenforced.""" - ctx = self.apic.fvCtx.get(tenant_id, name) - if not ctx: - self.apic.fvCtx.create(tenant_id, name, - pcEnfPref=CONTEXT_UNENFORCED) - elif ctx['pcEnfPref'] != CONTEXT_UNENFORCED: - self.apic.fvCtx.update(tenant_id, name, - pcEnfPref=CONTEXT_UNENFORCED) - - def ensure_context_enforced(self, tenant_id=TENANT_COMMON, - name=CONTEXT_DEFAULT): - """Set the specified tenant's context to enforced.""" - ctx = self.apic.fvCtx.get(tenant_id, name) - if not ctx: - self.apic.fvCtx.create(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) - elif ctx['pcEnfPref'] != CONTEXT_ENFORCED: - self.apic.fvCtx.update(tenant_id, name, pcEnfPref=CONTEXT_ENFORCED) - - def ensure_entity_profile_created_on_apic(self, name): - """Create the infrastructure entity profile.""" - if self.clear_node_profiles: - self.apic.infraAttEntityP.delete(name) - self.entity_profile = self.apic.infraAttEntityP.get(name) - if not self.entity_profile: - try: - phys_dn = self.phys_domain[DN_KEY] - self.apic.infraAttEntityP.create(name) - # Attach phys domain to entity profile - self.apic.infraRsDomP.create(name, phys_dn) - self.entity_profile = self.apic.infraAttEntityP.get(name) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the created entity profile - self.apic.infraAttEntityP.delete(name) - - def ensure_function_profile_created_on_apic(self, name): - """Create the infrastructure function profile.""" - if self.clear_node_profiles: - self.apic.infraAccPortGrp.delete(name) - self.function_profile = self.apic.infraAccPortGrp.get(name) - if not self.function_profile: - try: - self.apic.infraAccPortGrp.create(name) - # Attach entity profile to function profile - entp_dn = self.entity_profile[DN_KEY] - self.apic.infraRsAttEntP.create(name, tDn=entp_dn) - self.function_profile = self.apic.infraAccPortGrp.get(name) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the created function profile - self.apic.infraAccPortGrp.delete(name) - - def ensure_node_profile_created_for_switch(self, switch_id): - """Creates a switch node profile. - - Create a node profile for a switch and add a switch - to the leaf node selector - """ - if self.clear_node_profiles: - self.apic.infraNodeP.delete(switch_id) - self.db.delete_profile_for_node(switch_id) - sobj = self.apic.infraNodeP.get(switch_id) - if not sobj: - try: - # Create Node profile - self.apic.infraNodeP.create(switch_id) - # Create leaf selector - lswitch_id = uuid.uuid4() - self.apic.infraLeafS.create(switch_id, lswitch_id, 'range') - # Add leaf nodes to the selector - name = uuid.uuid4() - self.apic.infraNodeBlk.create(switch_id, lswitch_id, 'range', - name, from_=switch_id, - to_=switch_id) - sobj = self.apic.infraNodeP.get(switch_id) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Remove the node profile - self.apic.infraNodeP.delete(switch_id) - - self.node_profiles[switch_id] = { - 'object': sobj - } - - def ensure_port_profile_created_on_apic(self, name): - """Create a port profile.""" - try: - self.apic.infraAccPortP.create(name) - return self.apic.infraAccPortP.get(name) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - self.apic.infraAccPortP.delete(name) - - def ensure_vmm_domain_created_on_apic(self, vmm_name, - vlan_ns=None, vxlan_ns=None): - """Create Virtual Machine Manager domain. - - Creates the VMM domain on the APIC and adds a VLAN or VXLAN - namespace to that VMM domain. - TODO (asomya): Add VXLAN support - """ - provider = 'VMware' - if self.clear_node_profiles: - self.apic.vmmDomP.delete(provider, vmm_name) - self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) - if not self.vmm_domain: - try: - self.apic.vmmDomP.create(provider, vmm_name) - if vlan_ns: - vlan_ns_dn = vlan_ns[DN_KEY] - self.apic.infraRsVlanNs__vmm.create(provider, vmm_name, - tDn=vlan_ns_dn) - self.vmm_domain = self.apic.vmmDomP.get(provider, vmm_name) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the VMM domain - self.apic.vmmDomP.delete(provider, vmm_name) - - def ensure_phys_domain_created_on_apic(self, phys_name, - vlan_ns=None): - """Create Virtual Machine Manager domain. - - Creates the VMM domain on the APIC and adds a VLAN or VXLAN - namespace to that VMM domain. - TODO (asomya): Add VXLAN support - """ - if self.clear_node_profiles: - self.apic.physDomP.delete(phys_name) - self.phys_domain = self.apic.physDomP.get(phys_name) - if not self.phys_domain: - try: - self.apic.physDomP.create(phys_name) - if vlan_ns: - vlan_ns_dn = vlan_ns[DN_KEY] - self.apic.infraRsVlanNs__phys.create(phys_name, - tDn=vlan_ns_dn) - self.phys_domain = self.apic.physDomP.get(phys_name) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the physical domain - self.apic.physDomP.delete(phys_name) - - def ensure_vlan_ns_created_on_apic(self, name, vlan_min, vlan_max): - """Creates a static VLAN namespace with the given vlan range.""" - ns_args = name, 'static' - if self.clear_node_profiles: - self.apic.fvnsVlanInstP.delete(name, 'dynamic') - self.apic.fvnsVlanInstP.delete(*ns_args) - self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) - if not self.vlan_ns: - try: - self.apic.fvnsVlanInstP.create(*ns_args) - vlan_min = 'vlan-' + vlan_min - vlan_max = 'vlan-' + vlan_max - ns_blk_args = name, 'static', vlan_min, vlan_max - vlan_encap = self.apic.fvnsEncapBlk__vlan.get(*ns_blk_args) - if not vlan_encap: - ns_kw_args = { - 'name': 'encap', - 'from': vlan_min, - 'to': vlan_max - } - self.apic.fvnsEncapBlk__vlan.create(*ns_blk_args, - **ns_kw_args) - self.vlan_ns = self.apic.fvnsVlanInstP.get(*ns_args) - return self.vlan_ns - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the vlan namespace - self.apic.fvnsVlanInstP.delete(*ns_args) - - def ensure_tenant_created_on_apic(self, tenant_id): - """Make sure a tenant exists on the APIC.""" - if not self.apic.fvTenant.get(tenant_id): - self.apic.fvTenant.create(tenant_id) - - def ensure_bd_created_on_apic(self, tenant_id, bd_id): - """Creates a Bridge Domain on the APIC.""" - if not self.apic.fvBD.get(tenant_id, bd_id): - try: - self.apic.fvBD.create(tenant_id, bd_id) - # Add default context to the BD - self.ensure_context_enforced() - self.apic.fvRsCtx.create(tenant_id, bd_id, - tnFvCtxName=CONTEXT_DEFAULT) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the bridge domain - self.apic.fvBD.delete(tenant_id, bd_id) - - def delete_bd_on_apic(self, tenant_id, bd_id): - """Deletes a Bridge Domain from the APIC.""" - self.apic.fvBD.delete(tenant_id, bd_id) - - def ensure_subnet_created_on_apic(self, tenant_id, bd_id, gw_ip): - """Creates a subnet on the APIC - - The gateway ip (gw_ip) should be specified as a CIDR - e.g. 10.0.0.1/24 - """ - if not self.apic.fvSubnet.get(tenant_id, bd_id, gw_ip): - self.apic.fvSubnet.create(tenant_id, bd_id, gw_ip) - - def ensure_filter_created_on_apic(self, tenant_id, filter_id): - """Create a filter on the APIC.""" - if not self.apic.vzFilter.get(tenant_id, filter_id): - self.apic.vzFilter.create(tenant_id, filter_id) - - def ensure_epg_created_for_network(self, tenant_id, network_id, net_name): - """Creates an End Point Group on the APIC. - - Create a new EPG on the APIC for the network spcified. This information - is also tracked in the local DB and associate the bridge domain for the - network with the EPG created. - """ - # Check if an EPG is already present for this network - epg = self.db.get_epg_for_network(network_id) - if epg: - return epg - - # Create a new EPG on the APIC - epg_uid = '-'.join([str(net_name), str(uuid.uuid4())]) - try: - self.apic.fvAEPg.create(tenant_id, AP_NAME, epg_uid) - - # Add bd to EPG - bd = self.apic.fvBD.get(tenant_id, network_id) - bd_name = bd['name'] - - # Create fvRsBd - self.apic.fvRsBd.create(tenant_id, AP_NAME, epg_uid, - tnFvBDName=bd_name) - - # Add EPG to physical domain - phys_dn = self.phys_domain[DN_KEY] - self.apic.fvRsDomAtt.create(tenant_id, AP_NAME, epg_uid, phys_dn) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete the EPG - self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg_uid) - - # Stick it in the DB - epg = self.db.write_epg_for_network(network_id, epg_uid) - - return epg - - def delete_epg_for_network(self, tenant_id, network_id): - """Deletes the EPG from the APIC and removes it from the DB.""" - # Check if an EPG is already present for this network - epg = self.db.get_epg_for_network(network_id) - if not epg: - return False - - # Delete this epg - self.apic.fvAEPg.delete(tenant_id, AP_NAME, epg.epg_id) - # Remove DB row - self.db.delete_epg(epg) - - def create_tenant_filter(self, tenant_id): - """Creates a tenant filter and a generic entry under it.""" - fuuid = uuid.uuid4() - try: - # Create a new tenant filter - self.apic.vzFilter.create(tenant_id, fuuid) - # Create a new entry - euuid = uuid.uuid4() - self.apic.vzEntry.create(tenant_id, fuuid, euuid) - return fuuid - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - self.apic.vzFilter.delete(tenant_id, fuuid) - - def set_contract_for_epg(self, tenant_id, epg_id, - contract_id, provider=False): - """Set the contract for an EPG. - - By default EPGs are consumers to a contract. Set provider flag - for a single EPG to act as a contract provider. - """ - if provider: - try: - self.apic.fvRsProv.create(tenant_id, AP_NAME, - epg_id, contract_id) - self.db.set_provider_contract(epg_id) - self.make_tenant_contract_global(tenant_id) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - self.make_tenant_contract_local(tenant_id) - self.apic.fvRsProv.delete(tenant_id, AP_NAME, - epg_id, contract_id) - else: - self.apic.fvRsCons.create(tenant_id, AP_NAME, epg_id, contract_id) - - def delete_contract_for_epg(self, tenant_id, epg_id, - contract_id, provider=False): - """Delete the contract for an End Point Group. - - Check if the EPG was a provider and attempt to grab another contract - consumer from the DB and set that as the new contract provider. - """ - if provider: - self.apic.fvRsProv.delete(tenant_id, AP_NAME, epg_id, contract_id) - self.db.unset_provider_contract(epg_id) - # Pick out another EPG to set as contract provider - epg = self.db.get_an_epg(epg_id) - self.update_contract_for_epg(tenant_id, epg.epg_id, - contract_id, True) - else: - self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) - - def update_contract_for_epg(self, tenant_id, epg_id, - contract_id, provider=False): - """Updates the contract for an End Point Group.""" - self.apic.fvRsCons.delete(tenant_id, AP_NAME, epg_id, contract_id) - self.set_contract_for_epg(tenant_id, epg_id, contract_id, provider) - - def create_tenant_contract(self, tenant_id): - """Creates a tenant contract. - - Create a tenant contract if one doesn't exist. Also create a - subject, filter and entry and set the filters to allow all - protocol traffic on all ports - """ - contract = self.db.get_contract_for_tenant(tenant_id) - if not contract: - cuuid = uuid.uuid4() - try: - # Create contract - self.apic.vzBrCP.create(tenant_id, cuuid, scope=SCOPE_TENANT) - acontract = self.apic.vzBrCP.get(tenant_id, cuuid) - # Create subject - suuid = uuid.uuid4() - self.apic.vzSubj.create(tenant_id, cuuid, suuid) - # Create filter and entry - tfilter = self.create_tenant_filter(tenant_id) - # Create interm and outterm - self.apic.vzInTerm.create(tenant_id, cuuid, suuid) - self.apic.vzRsFiltAtt__In.create(tenant_id, cuuid, - suuid, tfilter) - self.apic.vzOutTerm.create(tenant_id, cuuid, suuid) - self.apic.vzRsFiltAtt__Out.create(tenant_id, cuuid, - suuid, tfilter) - # Create contract interface - iuuid = uuid.uuid4() - self.apic.vzCPIf.create(tenant_id, iuuid) - self.apic.vzRsIf.create(tenant_id, iuuid, - tDn=acontract[DN_KEY]) - # Store contract in DB - contract = self.db.write_contract_for_tenant(tenant_id, - cuuid, tfilter) - except (cexc.ApicResponseNotOk, KeyError): - with excutils.save_and_reraise_exception(): - # Delete tenant contract - self.apic.vzBrCP.delete(tenant_id, cuuid) - - return contract - - def make_tenant_contract_global(self, tenant_id): - """Mark the tenant contract's scope to global.""" - contract = self.db.get_contract_for_tenant(tenant_id) - self.apic.vzBrCP.update(tenant_id, contract.contract_id, - scope=SCOPE_GLOBAL) - - def make_tenant_contract_local(self, tenant_id): - """Mark the tenant contract's scope to tenant.""" - contract = self.db.get_contract_for_tenant(tenant_id) - self.apic.vzBrCP.update(tenant_id, contract.contract_id, - scope=SCOPE_TENANT) - - def ensure_path_created_for_port(self, tenant_id, network_id, - host_id, encap, net_name): - """Create path attribute for an End Point Group.""" - encap = 'vlan-' + str(encap) - epg = self.ensure_epg_created_for_network(tenant_id, network_id, - net_name) - eid = epg.epg_id - - # Get attached switch and port for this host - host_config = config.get_switch_and_port_for_host(host_id) - if not host_config: - raise cexc.ApicHostNotConfigured(host=host_id) - switch, port = host_config - pdn = PORT_DN_PATH % (switch, port) - - # Check if exists - patt = self.apic.fvRsPathAtt.get(tenant_id, AP_NAME, eid, pdn) - if not patt: - self.apic.fvRsPathAtt.create(tenant_id, AP_NAME, eid, pdn, - encap=encap, mode="regular", - instrImedcy="immediate") diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py deleted file mode 100644 index a3c05d630..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. - -import sqlalchemy as sa - -from neutron.db import api as db_api -from neutron.db import model_base -from neutron.db import models_v2 - - -class NetworkEPG(model_base.BASEV2): - - """EPG's created on the apic per network.""" - - __tablename__ = 'cisco_ml2_apic_epgs' - - network_id = sa.Column(sa.String(255), nullable=False, primary_key=True) - epg_id = sa.Column(sa.String(64), nullable=False) - segmentation_id = sa.Column(sa.String(64), nullable=False) - provider = sa.Column(sa.Boolean, default=False, nullable=False) - - -class PortProfile(model_base.BASEV2): - - """Port profiles created on the APIC.""" - - __tablename__ = 'cisco_ml2_apic_port_profiles' - - node_id = sa.Column(sa.String(255), nullable=False, primary_key=True) - profile_id = sa.Column(sa.String(64), nullable=False) - hpselc_id = sa.Column(sa.String(64), nullable=False) - module = sa.Column(sa.String(10), nullable=False) - from_port = sa.Column(sa.Integer(), nullable=False) - to_port = sa.Column(sa.Integer(), nullable=False) - - -class TenantContract(model_base.BASEV2, models_v2.HasTenant): - - """Contracts (and Filters) created on the APIC.""" - - __tablename__ = 'cisco_ml2_apic_contracts' - - __table_args__ = (sa.PrimaryKeyConstraint('tenant_id'),) - contract_id = sa.Column(sa.String(64), nullable=False) - filter_id = sa.Column(sa.String(64), nullable=False) - - -class ApicDbModel(object): - - """DB Model to manage all APIC DB interactions.""" - - def __init__(self): - self.session = db_api.get_session() - - def get_port_profile_for_node(self, node_id): - """Returns a port profile for a switch if found in the DB.""" - return self.session.query(PortProfile).filter_by( - node_id=node_id).first() - - def get_profile_for_module_and_ports(self, node_id, profile_id, - module, from_port, to_port): - """Returns profile for module and ports. - - Grabs the profile row from the DB for the specified switch, - module (linecard) and from/to port combination. - """ - return self.session.query(PortProfile).filter_by( - node_id=node_id, - module=module, - profile_id=profile_id, - from_port=from_port, - to_port=to_port).first() - - def get_profile_for_module(self, node_id, profile_id, module): - """Returns the first profile for a switch module from the DB.""" - return self.session.query(PortProfile).filter_by( - node_id=node_id, - profile_id=profile_id, - module=module).first() - - def add_profile_for_module_and_ports(self, node_id, profile_id, - hpselc_id, module, - from_port, to_port): - """Adds a profile for switch, module and port range.""" - row = PortProfile(node_id=node_id, profile_id=profile_id, - hpselc_id=hpselc_id, module=module, - from_port=from_port, to_port=to_port) - self.session.add(row) - self.session.flush() - - def get_provider_contract(self): - """Returns provider EPG from the DB if found.""" - return self.session.query(NetworkEPG).filter_by( - provider=True).first() - - def set_provider_contract(self, epg_id): - """Sets an EPG to be a contract provider.""" - epg = self.session.query(NetworkEPG).filter_by( - epg_id=epg_id).first() - if epg: - epg.provider = True - self.session.merge(epg) - self.session.flush() - - def unset_provider_contract(self, epg_id): - """Sets an EPG to be a contract consumer.""" - epg = self.session.query(NetworkEPG).filter_by( - epg_id=epg_id).first() - if epg: - epg.provider = False - self.session.merge(epg) - self.session.flush() - - def get_an_epg(self, exception): - """Returns an EPG from the DB that does not match the id specified.""" - return self.session.query(NetworkEPG).filter( - NetworkEPG.epg_id != exception).first() - - def get_epg_for_network(self, network_id): - """Returns an EPG for a give neutron network.""" - return self.session.query(NetworkEPG).filter_by( - network_id=network_id).first() - - def write_epg_for_network(self, network_id, epg_uid, segmentation_id='1'): - """Stores EPG details for a network. - - NOTE: Segmentation_id is just a placeholder currently, it will be - populated with a proper segment id once segmentation mgmt is - moved to the APIC. - """ - epg = NetworkEPG(network_id=network_id, epg_id=epg_uid, - segmentation_id=segmentation_id) - self.session.add(epg) - self.session.flush() - return epg - - def delete_epg(self, epg): - """Deletes an EPG from the DB.""" - self.session.delete(epg) - self.session.flush() - - def get_contract_for_tenant(self, tenant_id): - """Returns the specified tenant's contract.""" - return self.session.query(TenantContract).filter_by( - tenant_id=tenant_id).first() - - def write_contract_for_tenant(self, tenant_id, contract_id, filter_id): - """Stores a new contract for the given tenant.""" - contract = TenantContract(tenant_id=tenant_id, - contract_id=contract_id, - filter_id=filter_id) - self.session.add(contract) - self.session.flush() - - return contract - - def delete_profile_for_node(self, node_id): - """Deletes the port profile for a node.""" - profile = self.session.query(PortProfile).filter_by( - node_id=node_id).first() - if profile: - self.session.delete(profile) - self.session.flush() diff --git a/neutron/plugins/ml2/drivers/cisco/apic/config.py b/neutron/plugins/ml2/drivers/cisco/apic/config.py deleted file mode 100644 index c5c43f28f..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/config.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. - -from oslo.config import cfg - - -apic_opts = [ - cfg.StrOpt('apic_host', - help=_("Host name or IP Address of the APIC controller")), - cfg.StrOpt('apic_username', - help=_("Username for the APIC controller")), - cfg.StrOpt('apic_password', - help=_("Password for the APIC controller"), secret=True), - cfg.StrOpt('apic_port', - help=_("Communication port for the APIC controller")), - cfg.StrOpt('apic_vmm_provider', default='VMware', - help=_("Name for the VMM domain provider")), - cfg.StrOpt('apic_vmm_domain', default='openstack', - help=_("Name for the VMM domain to be created for Openstack")), - cfg.StrOpt('apic_vlan_ns_name', default='openstack_ns', - help=_("Name for the vlan namespace to be used for openstack")), - cfg.StrOpt('apic_vlan_range', default='2:4093', - help=_("Range of VLAN's to be used for Openstack")), - cfg.StrOpt('apic_node_profile', default='openstack_profile', - help=_("Name of the node profile to be created")), - cfg.StrOpt('apic_entity_profile', default='openstack_entity', - help=_("Name of the entity profile to be created")), - cfg.StrOpt('apic_function_profile', default='openstack_function', - help=_("Name of the function profile to be created")), - cfg.BoolOpt('apic_clear_node_profiles', default=False, - help=_("Clear the node profiles on the APIC at startup " - "(mainly used for testing)")), -] - - -cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic") - - -def get_switch_and_port_for_host(host_id): - for switch, connected in _switch_dict.items(): - for port, hosts in connected.items(): - if host_id in hosts: - return switch, port - - -_switch_dict = {} - - -def create_switch_dictionary(): - multi_parser = cfg.MultiConfigParser() - read_ok = multi_parser.read(cfg.CONF.config_file) - - if len(read_ok) != len(cfg.CONF.config_file): - raise cfg.Error(_("Some config files were not parsed properly")) - - for parsed_file in multi_parser.parsed: - for parsed_item in parsed_file.keys(): - if parsed_item.startswith('apic_switch'): - switch, switch_id = parsed_item.split(':') - if switch.lower() == 'apic_switch': - _switch_dict[switch_id] = {} - port_cfg = parsed_file[parsed_item].items() - for host_list, port in port_cfg: - hosts = host_list.split(',') - port = port[0] - _switch_dict[switch_id][port] = hosts - - return _switch_dict diff --git a/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py b/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py deleted file mode 100644 index b33abb17d..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/exceptions.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2014 Cisco Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Henry Gessau, Cisco Systems - -"""Exceptions used by Cisco APIC ML2 mechanism driver.""" - -from neutron.common import exceptions - - -class ApicHostNoResponse(exceptions.NotFound): - """No response from the APIC via the specified URL.""" - message = _("No response from APIC at %(url)s") - - -class ApicResponseNotOk(exceptions.NeutronException): - """A response from the APIC was not HTTP OK.""" - message = _("APIC responded with HTTP status %(status)s: %(reason)s, " - "Request: '%(request)s', " - "APIC error code %(err_code)s: %(err_text)s") - - -class ApicResponseNoCookie(exceptions.NeutronException): - """A response from the APIC did not contain an expected cookie.""" - message = _("APIC failed to provide cookie for %(request)s request") - - -class ApicSessionNotLoggedIn(exceptions.NotAuthorized): - """Attempted APIC operation while not logged in to APIC.""" - message = _("Authorized APIC session not established") - - -class ApicHostNotConfigured(exceptions.NotAuthorized): - """The switch and port for the specified host are not configured.""" - message = _("The switch and port for host '%(host)s' are not configured") - - -class ApicManagedObjectNotSupported(exceptions.NeutronException): - """Attempted to use an unsupported Managed Object.""" - message = _("Managed Object '%(mo_class)s' is not supported") - - -class ApicMultipleVlanRanges(exceptions.NeutronException): - """Multiple VLAN ranges specified.""" - message = _("Multiple VLAN ranges are not supported in the APIC plugin. " - "Please specify a single VLAN range. " - "Current config: '%(vlan_ranges)s'") diff --git a/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py b/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py deleted file mode 100644 index d5297df68..000000000 --- a/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) 2014 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Arvind Somya (asomya@cisco.com), Cisco Systems Inc. - -import netaddr - -from oslo.config import cfg - -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.common import constants -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.cisco.apic import apic_manager -from neutron.plugins.ml2.drivers.cisco.apic import exceptions as apic_exc - - -LOG = log.getLogger(__name__) - - -class APICMechanismDriver(api.MechanismDriver): - - def initialize(self): - self.apic_manager = apic_manager.APICManager() - - # Create a Phys domain and VLAN namespace - # Get vlan ns name - ns_name = cfg.CONF.ml2_cisco_apic.apic_vlan_ns_name - - # Grab vlan ranges - if len(cfg.CONF.ml2_type_vlan.network_vlan_ranges) != 1: - raise apic_exc.ApicMultipleVlanRanges( - cfg.CONF.ml2_type_vlan.network_vlan_ranges) - vlan_ranges = cfg.CONF.ml2_type_vlan.network_vlan_ranges[0] - if ',' in vlan_ranges: - raise apic_exc.ApicMultipleVlanRanges(vlan_ranges) - (vlan_min, vlan_max) = vlan_ranges.split(':')[-2:] - - # Create VLAN namespace - vlan_ns = self.apic_manager.ensure_vlan_ns_created_on_apic(ns_name, - vlan_min, - vlan_max) - phys_name = cfg.CONF.ml2_cisco_apic.apic_vmm_domain - # Create Physical domain - self.apic_manager.ensure_phys_domain_created_on_apic(phys_name, - vlan_ns) - - # Create entity profile - ent_name = cfg.CONF.ml2_cisco_apic.apic_entity_profile - self.apic_manager.ensure_entity_profile_created_on_apic(ent_name) - - # Create function profile - func_name = cfg.CONF.ml2_cisco_apic.apic_function_profile - self.apic_manager.ensure_function_profile_created_on_apic(func_name) - - # Create infrastructure on apic - self.apic_manager.ensure_infra_created_on_apic() - - def _perform_port_operations(self, context): - # Get tenant details from port context - tenant_id = context.current['tenant_id'] - - # Get network - network = context.network.current['id'] - net_name = context.network.current['name'] - - # Get port - port = context.current - - # Get segmentation id - if not context.bound_segment: - LOG.debug(_("Port %s is not bound to a segment"), port) - return - seg = None - if (context.bound_segment.get(api.NETWORK_TYPE) in - [constants.TYPE_VLAN]): - seg = context.bound_segment.get(api.SEGMENTATION_ID) - - # Check if a compute port - if not port['device_owner'].startswith('compute'): - # Not a compute port, return - return - - host = port.get(portbindings.HOST_ID) - # Check host that the dhcp agent is running on - filters = {'device_owner': 'network:dhcp', - 'network_id': network} - dhcp_ports = context._plugin.get_ports(context._plugin_context, - filters=filters) - dhcp_hosts = [] - for dhcp_port in dhcp_ports: - dhcp_hosts.append(dhcp_port.get(portbindings.HOST_ID)) - - # Create a static path attachment for this host/epg/switchport combo - self.apic_manager.ensure_tenant_created_on_apic(tenant_id) - if dhcp_hosts: - for dhcp_host in dhcp_hosts: - self.apic_manager.ensure_path_created_for_port(tenant_id, - network, - dhcp_host, seg, - net_name) - if host not in dhcp_hosts: - self.apic_manager.ensure_path_created_for_port(tenant_id, network, - host, seg, net_name) - - def create_port_postcommit(self, context): - self._perform_port_operations(context) - - def update_port_postcommit(self, context): - self._perform_port_operations(context) - - def create_network_postcommit(self, context): - net_id = context.current['id'] - tenant_id = context.current['tenant_id'] - net_name = context.current['name'] - - self.apic_manager.ensure_bd_created_on_apic(tenant_id, net_id) - # Create EPG for this network - self.apic_manager.ensure_epg_created_for_network(tenant_id, net_id, - net_name) - - def delete_network_postcommit(self, context): - net_id = context.current['id'] - tenant_id = context.current['tenant_id'] - - self.apic_manager.delete_bd_on_apic(tenant_id, net_id) - self.apic_manager.delete_epg_for_network(tenant_id, net_id) - - def create_subnet_postcommit(self, context): - tenant_id = context.current['tenant_id'] - network_id = context.current['network_id'] - gateway_ip = context.current['gateway_ip'] - cidr = netaddr.IPNetwork(context.current['cidr']) - netmask = str(cidr.prefixlen) - gateway_ip = gateway_ip + '/' + netmask - - self.apic_manager.ensure_subnet_created_on_apic(tenant_id, network_id, - gateway_ip) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/README b/neutron/plugins/ml2/drivers/cisco/nexus/README deleted file mode 100644 index 21905b036..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/README +++ /dev/null @@ -1,19 +0,0 @@ -Neutron ML2 Cisco Nexus Mechanism Driver README - - -Notes: - -The initial version of this driver supports only a single physical -network. - -For provider networks, extended configuration options are not -currently supported. - -This driver's database may have duplicate entries also found in the -core ML2 database. Since the Cisco Nexus DB code is a port from the -plugins/cisco implementation this duplication will remain until the -plugins/cisco code is deprecated. - - -For more details on using Cisco Nexus switches under ML2 please refer to: -http://wiki.openstack.org/wiki/Neutron/ML2/MechCiscoNexus diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py b/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/config.py b/neutron/plugins/ml2/drivers/cisco/nexus/config.py deleted file mode 100644 index 3be443088..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/config.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - - -ml2_cisco_opts = [ - cfg.StrOpt('vlan_name_prefix', default='q-', - help=_("VLAN Name prefix")), - cfg.BoolOpt('svi_round_robin', default=False, - help=_("Distribute SVI interfaces over all switches")), - cfg.StrOpt('managed_physical_network', - help=_("The physical network managed by the switches.")), -] - - -cfg.CONF.register_opts(ml2_cisco_opts, "ml2_cisco") - -# -# Format for ml2_conf_cisco.ini 'ml2_mech_cisco_nexus' is: -# {('', ''): '', ...} -# -# Example: -# {('1.1.1.1', 'username'): 'admin', -# ('1.1.1.1', 'password'): 'mySecretPassword', -# ('1.1.1.1', 'compute1'): '1/1', ...} -# - - -class ML2MechCiscoConfig(object): - """ML2 Mechanism Driver Cisco Configuration class.""" - nexus_dict = {} - - def __init__(self): - self._create_ml2_mech_device_cisco_dictionary() - - def _create_ml2_mech_device_cisco_dictionary(self): - """Create the ML2 device cisco dictionary. - - Read data from the ml2_conf_cisco.ini device supported sections. - """ - multi_parser = cfg.MultiConfigParser() - read_ok = multi_parser.read(cfg.CONF.config_file) - - if len(read_ok) != len(cfg.CONF.config_file): - raise cfg.Error(_("Some config files were not parsed properly")) - - for parsed_file in multi_parser.parsed: - for parsed_item in parsed_file.keys(): - dev_id, sep, dev_ip = parsed_item.partition(':') - if dev_id.lower() == 'ml2_mech_cisco_nexus': - for dev_key, value in parsed_file[parsed_item].items(): - self.nexus_dict[dev_ip, dev_key] = value[0] diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/constants.py b/neutron/plugins/ml2/drivers/cisco/nexus/constants.py deleted file mode 100644 index f3191b0b2..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/constants.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -CREDENTIAL_USERNAME = 'user_name' -CREDENTIAL_PASSWORD = 'password' - -USERNAME = 'username' -PASSWORD = 'password' - -NETWORK_ADMIN = 'network_admin' diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py b/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py deleted file mode 100644 index 9302f30de..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Exceptions used by Cisco Nexus ML2 mechanism driver.""" - -from neutron.common import exceptions - - -class CredentialNotFound(exceptions.NeutronException): - """Credential with this ID cannot be found.""" - message = _("Credential %(credential_id)s could not be found.") - - -class CredentialNameNotFound(exceptions.NeutronException): - """Credential Name could not be found.""" - message = _("Credential %(credential_name)s could not be found.") - - -class CredentialAlreadyExists(exceptions.NeutronException): - """Credential name already exists.""" - message = _("Credential %(credential_name)s already exists " - "for tenant %(tenant_id)s.") - - -class NexusComputeHostNotConfigured(exceptions.NeutronException): - """Connection to compute host is not configured.""" - message = _("Connection to %(host)s is not configured.") - - -class NexusConnectFailed(exceptions.NeutronException): - """Failed to connect to Nexus switch.""" - message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") - - -class NexusConfigFailed(exceptions.NeutronException): - """Failed to configure Nexus switch.""" - message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") - - -class NexusPortBindingNotFound(exceptions.NeutronException): - """NexusPort Binding is not present.""" - message = _("Nexus Port Binding (%(filters)s) is not present") - - def __init__(self, **kwargs): - filters = ','.join('%s=%s' % i for i in kwargs.items()) - super(NexusPortBindingNotFound, self).__init__(filters=filters) - - -class NexusMissingRequiredFields(exceptions.NeutronException): - """Missing required fields to configure nexus switch.""" - message = _("Missing required field(s) to configure nexus switch: " - "%(fields)s") - - -class NoNexusSviSwitch(exceptions.NeutronException): - """No usable nexus switch found.""" - message = _("No usable Nexus switch found to create SVI interface.") - - -class SubnetNotSpecified(exceptions.NeutronException): - """Subnet id not specified.""" - message = _("No subnet_id specified for router gateway.") - - -class SubnetInterfacePresent(exceptions.NeutronException): - """Subnet SVI interface already exists.""" - message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") - - -class PortIdForNexusSvi(exceptions.NeutronException): - """Port Id specified for Nexus SVI.""" - message = _('Nexus hardware router gateway only uses Subnet Ids.') diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py b/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py deleted file mode 100644 index 8db752829..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -ML2 Mechanism Driver for Cisco Nexus platforms. -""" - -from oslo.config import cfg - -from neutron.common import constants as n_const -from neutron.extensions import portbindings -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.cisco.nexus import config as conf -from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as excep -from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 as nxos_db -from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver - -LOG = logging.getLogger(__name__) - - -class CiscoNexusMechanismDriver(api.MechanismDriver): - - """Cisco Nexus ML2 Mechanism Driver.""" - - def initialize(self): - # Create ML2 device dictionary from ml2_conf.ini entries. - conf.ML2MechCiscoConfig() - - # Extract configuration parameters from the configuration file. - self._nexus_switches = conf.ML2MechCiscoConfig.nexus_dict - LOG.debug(_("nexus_switches found = %s"), self._nexus_switches) - - self.driver = nexus_network_driver.CiscoNexusDriver() - - def _valid_network_segment(self, segment): - return (cfg.CONF.ml2_cisco.managed_physical_network is None or - cfg.CONF.ml2_cisco.managed_physical_network == - segment[api.PHYSICAL_NETWORK]) - - def _get_vlanid(self, segment): - if (segment and segment[api.NETWORK_TYPE] == p_const.TYPE_VLAN and - self._valid_network_segment(segment)): - return segment.get(api.SEGMENTATION_ID) - - def _is_deviceowner_compute(self, port): - return port['device_owner'].startswith('compute') - - def _is_status_active(self, port): - return port['status'] == n_const.PORT_STATUS_ACTIVE - - def _get_switch_info(self, host_id): - host_connections = [] - for switch_ip, attr in self._nexus_switches: - if str(attr) == str(host_id): - port_id = self._nexus_switches[switch_ip, attr] - if ':' in port_id: - intf_type, port = port_id.split(':') - else: - intf_type, port = 'ethernet', port_id - host_connections.append((switch_ip, intf_type, port)) - - if host_connections: - return host_connections - else: - raise excep.NexusComputeHostNotConfigured(host=host_id) - - def _configure_nxos_db(self, vlan_id, device_id, host_id): - """Create the nexus database entry. - - Called during update precommit port event. - """ - host_connections = self._get_switch_info(host_id) - for switch_ip, intf_type, nexus_port in host_connections: - port_id = '%s:%s' % (intf_type, nexus_port) - nxos_db.add_nexusport_binding(port_id, str(vlan_id), switch_ip, - device_id) - - def _configure_switch_entry(self, vlan_id, device_id, host_id): - """Create a nexus switch entry. - - if needed, create a VLAN in the appropriate switch/port and - configure the appropriate interfaces for this VLAN. - - Called during update postcommit port event. - """ - vlan_name = cfg.CONF.ml2_cisco.vlan_name_prefix + str(vlan_id) - host_connections = self._get_switch_info(host_id) - - for switch_ip, intf_type, nexus_port in host_connections: - # Check to see if this is the first binding to use this vlan on the - # switch/port. Configure switch accordingly. - bindings = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) - if len(bindings) == 1: - LOG.debug(_("Nexus: create & trunk vlan %s"), vlan_name) - self.driver.create_and_trunk_vlan( - switch_ip, vlan_id, vlan_name, intf_type, nexus_port) - else: - LOG.debug(_("Nexus: trunk vlan %s"), vlan_name) - self.driver.enable_vlan_on_trunk_int(switch_ip, vlan_id, - intf_type, nexus_port) - - def _delete_nxos_db(self, vlan_id, device_id, host_id): - """Delete the nexus database entry. - - Called during delete precommit port event. - """ - try: - rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id) - for row in rows: - nxos_db.remove_nexusport_binding( - row.port_id, row.vlan_id, row.switch_ip, row.instance_id) - except excep.NexusPortBindingNotFound: - return - - def _delete_switch_entry(self, vlan_id, device_id, host_id): - """Delete the nexus switch entry. - - By accessing the current db entries determine if switch - configuration can be removed. - - Called during update postcommit port event. - """ - host_connections = self._get_switch_info(host_id) - for switch_ip, intf_type, nexus_port in host_connections: - # if there are no remaining db entries using this vlan on this - # nexus switch port then remove vlan from the switchport trunk. - port_id = '%s:%s' % (intf_type, nexus_port) - try: - nxos_db.get_port_vlan_switch_binding(port_id, vlan_id, - switch_ip) - except excep.NexusPortBindingNotFound: - self.driver.disable_vlan_on_trunk_int(switch_ip, vlan_id, - intf_type, nexus_port) - - # if there are no remaining db entries using this vlan on this - # nexus switch then remove the vlan. - try: - nxos_db.get_nexusvlan_binding(vlan_id, switch_ip) - except excep.NexusPortBindingNotFound: - self.driver.delete_vlan(switch_ip, vlan_id) - - def _is_vm_migration(self, context): - if not context.bound_segment and context.original_bound_segment: - return (context.current.get(portbindings.HOST_ID) != - context.original.get(portbindings.HOST_ID)) - - def _port_action(self, port, segment, func): - """Verify configuration and then process event.""" - device_id = port.get('device_id') - host_id = port.get(portbindings.HOST_ID) - vlan_id = self._get_vlanid(segment) - - if vlan_id and device_id and host_id: - func(vlan_id, device_id, host_id) - else: - fields = "vlan_id " if not vlan_id else "" - fields += "device_id " if not device_id else "" - fields += "host_id" if not host_id else "" - raise excep.NexusMissingRequiredFields(fields=fields) - - def update_port_precommit(self, context): - """Update port pre-database transaction commit event.""" - - # if VM migration is occurring then remove previous database entry - # else process update event. - if self._is_vm_migration(context): - self._port_action(context.original, - context.original_bound_segment, - self._delete_nxos_db) - else: - if (self._is_deviceowner_compute(context.current) and - self._is_status_active(context.current)): - self._port_action(context.current, - context.bound_segment, - self._configure_nxos_db) - - def update_port_postcommit(self, context): - """Update port non-database commit event.""" - - # if VM migration is occurring then remove previous nexus switch entry - # else process update event. - if self._is_vm_migration(context): - self._port_action(context.original, - context.original_bound_segment, - self._delete_switch_entry) - else: - if (self._is_deviceowner_compute(context.current) and - self._is_status_active(context.current)): - self._port_action(context.current, - context.bound_segment, - self._configure_switch_entry) - - def delete_port_precommit(self, context): - """Delete port pre-database commit event.""" - if self._is_deviceowner_compute(context.current): - self._port_action(context.current, - context.bound_segment, - self._delete_nxos_db) - - def delete_port_postcommit(self, context): - """Delete port non-database commit event.""" - if self._is_deviceowner_compute(context.current): - self._port_action(context.current, - context.bound_segment, - self._delete_switch_entry) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py deleted file mode 100644 index 081b0d0a0..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_db_v2.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import sqlalchemy.orm.exc as sa_exc - -import neutron.db.api as db -from neutron.openstack.common import log as logging -from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc -from neutron.plugins.ml2.drivers.cisco.nexus import nexus_models_v2 - - -LOG = logging.getLogger(__name__) - - -def get_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Lists a nexusport binding.""" - LOG.debug(_("get_nexusport_binding() called")) - return _lookup_all_nexus_bindings(port_id=port_id, - vlan_id=vlan_id, - switch_ip=switch_ip, - instance_id=instance_id) - - -def get_nexusvlan_binding(vlan_id, switch_ip): - """Lists a vlan and switch binding.""" - LOG.debug(_("get_nexusvlan_binding() called")) - return _lookup_all_nexus_bindings(vlan_id=vlan_id, switch_ip=switch_ip) - - -def add_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Adds a nexusport binding.""" - LOG.debug(_("add_nexusport_binding() called")) - session = db.get_session() - binding = nexus_models_v2.NexusPortBinding(port_id=port_id, - vlan_id=vlan_id, - switch_ip=switch_ip, - instance_id=instance_id) - session.add(binding) - session.flush() - return binding - - -def remove_nexusport_binding(port_id, vlan_id, switch_ip, instance_id): - """Removes a nexusport binding.""" - LOG.debug(_("remove_nexusport_binding() called")) - session = db.get_session() - binding = _lookup_all_nexus_bindings(session=session, - vlan_id=vlan_id, - switch_ip=switch_ip, - port_id=port_id, - instance_id=instance_id) - for bind in binding: - session.delete(bind) - session.flush() - return binding - - -def update_nexusport_binding(port_id, new_vlan_id): - """Updates nexusport binding.""" - if not new_vlan_id: - LOG.warning(_("update_nexusport_binding called with no vlan")) - return - LOG.debug(_("update_nexusport_binding called")) - session = db.get_session() - binding = _lookup_one_nexus_binding(session=session, port_id=port_id) - binding.vlan_id = new_vlan_id - session.merge(binding) - session.flush() - return binding - - -def get_nexusvm_bindings(vlan_id, instance_id): - """Lists nexusvm bindings.""" - LOG.debug(_("get_nexusvm_bindings() called")) - return _lookup_all_nexus_bindings(instance_id=instance_id, - vlan_id=vlan_id) - - -def get_port_vlan_switch_binding(port_id, vlan_id, switch_ip): - """Lists nexusvm bindings.""" - LOG.debug(_("get_port_vlan_switch_binding() called")) - return _lookup_all_nexus_bindings(port_id=port_id, - switch_ip=switch_ip, - vlan_id=vlan_id) - - -def get_port_switch_bindings(port_id, switch_ip): - """List all vm/vlan bindings on a Nexus switch port.""" - LOG.debug(_("get_port_switch_bindings() called, " - "port:'%(port_id)s', switch:'%(switch_ip)s'"), - {'port_id': port_id, 'switch_ip': switch_ip}) - try: - return _lookup_all_nexus_bindings(port_id=port_id, - switch_ip=switch_ip) - except c_exc.NexusPortBindingNotFound: - pass - - -def _lookup_nexus_bindings(query_type, session=None, **bfilter): - """Look up 'query_type' Nexus bindings matching the filter. - - :param query_type: 'all', 'one' or 'first' - :param session: db session - :param bfilter: filter for bindings query - :return: bindings if query gave a result, else - raise NexusPortBindingNotFound. - """ - if session is None: - session = db.get_session() - query_method = getattr(session.query( - nexus_models_v2.NexusPortBinding).filter_by(**bfilter), query_type) - try: - bindings = query_method() - if bindings: - return bindings - except sa_exc.NoResultFound: - pass - raise c_exc.NexusPortBindingNotFound(**bfilter) - - -def _lookup_all_nexus_bindings(session=None, **bfilter): - return _lookup_nexus_bindings('all', session, **bfilter) - - -def _lookup_one_nexus_binding(session=None, **bfilter): - return _lookup_nexus_bindings('one', session, **bfilter) - - -def _lookup_first_nexus_binding(session=None, **bfilter): - return _lookup_nexus_bindings('first', session, **bfilter) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py deleted file mode 100644 index ce7c41663..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy as sa - -from neutron.db import model_base - - -class NexusPortBinding(model_base.BASEV2): - """Represents a binding of VM's to nexus ports.""" - - __tablename__ = "cisco_ml2_nexusport_bindings" - - binding_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) - port_id = sa.Column(sa.String(255)) - vlan_id = sa.Column(sa.Integer, nullable=False) - switch_ip = sa.Column(sa.String(255)) - instance_id = sa.Column(sa.String(255)) - - def __repr__(self): - """Just the binding, without the id key.""" - return ("" % - (self.port_id, self.vlan_id, self.switch_ip, self.instance_id)) - - def __eq__(self, other): - """Compare only the binding, without the id key.""" - return ( - self.port_id == other.port_id and - self.vlan_id == other.vlan_id and - self.switch_ip == other.switch_ip and - self.instance_id == other.instance_id - ) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py deleted file mode 100644 index 983678d11..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_network_driver.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implements a Nexus-OS NETCONF over SSHv2 API Client -""" - -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.ml2.drivers.cisco.nexus import config as conf -from neutron.plugins.ml2.drivers.cisco.nexus import constants as const -from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as cexc -from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2 -from neutron.plugins.ml2.drivers.cisco.nexus import nexus_snippets as snipp - -LOG = logging.getLogger(__name__) - - -class CiscoNexusDriver(object): - """Nexus Driver Main Class.""" - def __init__(self): - self.ncclient = None - self.nexus_switches = conf.ML2MechCiscoConfig.nexus_dict - self.connections = {} - - def _import_ncclient(self): - """Import the NETCONF client (ncclient) module. - - The ncclient module is not installed as part of the normal Neutron - distributions. It is imported dynamically in this module so that - the import can be mocked, allowing unit testing without requiring - the installation of ncclient. - - """ - return importutils.import_module('ncclient.manager') - - def _edit_config(self, nexus_host, target='running', config='', - allowed_exc_strs=None): - """Modify switch config for a target config type. - - :param nexus_host: IP address of switch to configure - :param target: Target config type - :param config: Configuration string in XML format - :param allowed_exc_strs: Exceptions which have any of these strings - as a subset of their exception message - (str(exception)) can be ignored - - :raises: NexusConfigFailed - - """ - if not allowed_exc_strs: - allowed_exc_strs = [] - mgr = self.nxos_connect(nexus_host) - try: - mgr.edit_config(target, config=config) - except Exception as e: - for exc_str in allowed_exc_strs: - if exc_str in str(e): - break - else: - # Raise a Neutron exception. Include a description of - # the original ncclient exception. - raise cexc.NexusConfigFailed(config=config, exc=e) - - def nxos_connect(self, nexus_host): - """Make SSH connection to the Nexus Switch.""" - if getattr(self.connections.get(nexus_host), 'connected', None): - return self.connections[nexus_host] - - if not self.ncclient: - self.ncclient = self._import_ncclient() - nexus_ssh_port = int(self.nexus_switches[nexus_host, 'ssh_port']) - nexus_user = self.nexus_switches[nexus_host, const.USERNAME] - nexus_password = self.nexus_switches[nexus_host, const.PASSWORD] - try: - man = self.ncclient.connect(host=nexus_host, - port=nexus_ssh_port, - username=nexus_user, - password=nexus_password) - self.connections[nexus_host] = man - except Exception as e: - # Raise a Neutron exception. Include a description of - # the original ncclient exception. - raise cexc.NexusConnectFailed(nexus_host=nexus_host, exc=e) - - return self.connections[nexus_host] - - def create_xml_snippet(self, customized_config): - """Create XML snippet. - - Creates the Proper XML structure for the Nexus Switch Configuration. - """ - conf_xml_snippet = snipp.EXEC_CONF_SNIPPET % (customized_config) - return conf_xml_snippet - - def create_vlan(self, nexus_host, vlanid, vlanname): - """Create a VLAN on Nexus Switch given the VLAN ID and Name.""" - confstr = self.create_xml_snippet( - snipp.CMD_VLAN_CONF_SNIPPET % (vlanid, vlanname)) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - # Enable VLAN active and no-shutdown states. Some versions of - # Nexus switch do not allow state changes for the extended VLAN - # range (1006-4094), but these errors can be ignored (default - # values are appropriate). - for snippet in [snipp.CMD_VLAN_ACTIVE_SNIPPET, - snipp.CMD_VLAN_NO_SHUTDOWN_SNIPPET]: - try: - confstr = self.create_xml_snippet(snippet % vlanid) - self._edit_config( - nexus_host, - target='running', - config=confstr, - allowed_exc_strs=["Can't modify state for extended", - "Command is only allowed on VLAN"]) - except cexc.NexusConfigFailed: - with excutils.save_and_reraise_exception(): - self.delete_vlan(nexus_host, vlanid) - - def delete_vlan(self, nexus_host, vlanid): - """Delete a VLAN on Nexus Switch given the VLAN ID.""" - confstr = snipp.CMD_NO_VLAN_CONF_SNIPPET % vlanid - confstr = self.create_xml_snippet(confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, - interface): - """Enable a VLAN on a trunk interface.""" - # If more than one VLAN is configured on this interface then - # include the 'add' keyword. - if len(nexus_db_v2.get_port_switch_bindings( - '%s:%s' % (intf_type, interface), nexus_host)) == 1: - snippet = snipp.CMD_INT_VLAN_SNIPPET - else: - snippet = snipp.CMD_INT_VLAN_ADD_SNIPPET - confstr = snippet % (intf_type, interface, vlanid, intf_type) - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type, - interface): - """Disable a VLAN on a trunk interface.""" - confstr = (snipp.CMD_NO_VLAN_INT_SNIPPET % - (intf_type, interface, vlanid, intf_type)) - confstr = self.create_xml_snippet(confstr) - LOG.debug(_("NexusDriver: %s"), confstr) - self._edit_config(nexus_host, target='running', config=confstr) - - def create_and_trunk_vlan(self, nexus_host, vlan_id, vlan_name, - intf_type, nexus_port): - """Create VLAN and trunk it on the specified ports.""" - self.create_vlan(nexus_host, vlan_id, vlan_name) - LOG.debug(_("NexusDriver created VLAN: %s"), vlan_id) - if nexus_port: - self.enable_vlan_on_trunk_int(nexus_host, vlan_id, intf_type, - nexus_port) diff --git a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py b/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py deleted file mode 100644 index fb38e4199..000000000 --- a/neutron/plugins/ml2/drivers/cisco/nexus/nexus_snippets.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -""" -Cisco Nexus-OS XML-based configuration snippets. -""" - -import logging - - -LOG = logging.getLogger(__name__) - - -# The following are standard strings, messages used to communicate with Nexus. -EXEC_CONF_SNIPPET = """ - - - <__XML__MODE__exec_configure>%s - - - -""" - -CMD_VLAN_CONF_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - %s - - - - -""" - -CMD_VLAN_ACTIVE_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - active - - - - -""" - -CMD_VLAN_NO_SHUTDOWN_SNIPPET = """ - - - <__XML__PARAM_value>%s - <__XML__MODE_vlan> - - - - - - -""" - -CMD_NO_VLAN_CONF_SNIPPET = """ - - - - <__XML__PARAM_value>%s - - - -""" - -CMD_INT_VLAN_HEADER = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - """ - -CMD_VLAN_ID = """ - %s""" - -CMD_VLAN_ADD_ID = """ - %s - """ % CMD_VLAN_ID - -CMD_INT_VLAN_TRAILER = """ - - - - - - - -""" - -CMD_INT_VLAN_SNIPPET = (CMD_INT_VLAN_HEADER + - CMD_VLAN_ID + - CMD_INT_VLAN_TRAILER) - -CMD_INT_VLAN_ADD_SNIPPET = (CMD_INT_VLAN_HEADER + - CMD_VLAN_ADD_ID + - CMD_INT_VLAN_TRAILER) - -CMD_PORT_TRUNK = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - - - - - - - -""" - -CMD_NO_SWITCHPORT = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - - - - -""" - -CMD_NO_VLAN_INT_SNIPPET = """ - - <%s> - %s - <__XML__MODE_if-ethernet-switch> - - - - - - - %s - - - - - - - - -""" - -CMD_VLAN_SVI_SNIPPET = """ - - - %s - <__XML__MODE_vlan> - - - - -
-
%s
-
-
- -
-
-""" - -CMD_NO_VLAN_SVI_SNIPPET = """ - - - - %s - - - -""" diff --git a/neutron/plugins/ml2/drivers/l2pop/README b/neutron/plugins/ml2/drivers/l2pop/README deleted file mode 100644 index 46bb27e54..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/README +++ /dev/null @@ -1,41 +0,0 @@ -Neutron ML2 l2 population Mechanism Drivers - -l2 population (l2pop) mechanism drivers implements the ML2 driver to improve -open source plugins overlay implementations (VXLAN with Linux bridge and -GRE/VXLAN with OVS). This mechanism driver is implemented in ML2 to propagate -the forwarding information among agents using a common RPC API. - -More informations could be found on the wiki page [1]. - -VXLAN Linux kernel: -------------------- -The VXLAN Linux kernel module provide all necessary functionalities to populate -the forwarding table and local ARP responder tables. This module appears on -release 3.7 of the vanilla Linux kernel in experimental: -- 3.8: first stable release, no edge replication (multicast necessary), -- 3.9: edge replication only for the broadcasted packets, -- 3.11: edge replication for broadcast, multicast and unknown packets. - -Note: Some distributions (like RHEL) have backported this module on precedent - kernel version. - -OpenvSwitch: ------------- -The OVS OpenFlow tables provide all of the necessary functionality to populate -the forwarding table and local ARP responder tables. -A wiki page describe how the flow tables did evolve on OVS agents: -- [2] without local ARP responder -- [3] with local ARP responder. /!\ This functionality is only available since - the development branch 2.1. It's possible - to disable (enable by default) it through - the flag 'arp_responder'. /!\ - - -Note: A difference persists between the LB and OVS agents when they are used - with the l2-pop mechanism driver (and local ARP responder available). The - LB agent will drop unknown unicast (VXLAN bridge mode), whereas the OVS - agent will flood it. - -[1] https://wiki.openstack.org/wiki/L2population_blueprint -[2] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic -[3] https://wiki.openstack.org/wiki/Ovs-flow-logic#OVS_flows_logic_with_local_ARP_responder \ No newline at end of file diff --git a/neutron/plugins/ml2/drivers/l2pop/__init__.py b/neutron/plugins/ml2/drivers/l2pop/__init__.py deleted file mode 100644 index b9b2306f9..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange diff --git a/neutron/plugins/ml2/drivers/l2pop/config.py b/neutron/plugins/ml2/drivers/l2pop/config.py deleted file mode 100644 index 1e0701e0b..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/config.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange - -from oslo.config import cfg - - -l2_population_options = [ - cfg.IntOpt('agent_boot_time', default=180, - help=_('Delay within which agent is expected to update ' - 'existing ports whent it restarts')), -] - -cfg.CONF.register_opts(l2_population_options, "l2pop") diff --git a/neutron/plugins/ml2/drivers/l2pop/constants.py b/neutron/plugins/ml2/drivers/l2pop/constants.py deleted file mode 100644 index 2c9b7f96f..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange - -from neutron.common import constants - -SUPPORTED_AGENT_TYPES = [constants.AGENT_TYPE_OVS, - constants.AGENT_TYPE_LINUXBRIDGE] diff --git a/neutron/plugins/ml2/drivers/l2pop/db.py b/neutron/plugins/ml2/drivers/l2pop/db.py deleted file mode 100644 index 3c4fc9bce..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/db.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange - -from sqlalchemy import sql - -from neutron.common import constants as const -from neutron.db import agents_db -from neutron.db import db_base_plugin_v2 as base_db -from neutron.db import models_v2 -from neutron.openstack.common import jsonutils -from neutron.openstack.common import timeutils -from neutron.plugins.ml2.drivers.l2pop import constants as l2_const -from neutron.plugins.ml2 import models as ml2_models - - -class L2populationDbMixin(base_db.CommonDbMixin): - - def get_agent_ip_by_host(self, session, agent_host): - agent = self.get_agent_by_host(session, agent_host) - if agent: - return self.get_agent_ip(agent) - - def get_agent_ip(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunneling_ip') - - def get_agent_uptime(self, agent): - return timeutils.delta_seconds(agent.started_at, - agent.heartbeat_timestamp) - - def get_agent_tunnel_types(self, agent): - configuration = jsonutils.loads(agent.configurations) - return configuration.get('tunnel_types') - - def get_agent_by_host(self, session, agent_host): - with session.begin(subtransactions=True): - query = session.query(agents_db.Agent) - query = query.filter(agents_db.Agent.host == agent_host, - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query.first() - - def get_network_ports(self, session, network_id): - with session.begin(subtransactions=True): - query = session.query(ml2_models.PortBinding, - agents_db.Agent) - query = query.join(agents_db.Agent, - agents_db.Agent.host == - ml2_models.PortBinding.host) - query = query.join(models_v2.Port) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.admin_state_up == sql.true(), - agents_db.Agent.agent_type.in_( - l2_const.SUPPORTED_AGENT_TYPES)) - return query - - def get_agent_network_active_port_count(self, session, agent_host, - network_id): - with session.begin(subtransactions=True): - query = session.query(models_v2.Port) - - query = query.join(ml2_models.PortBinding) - query = query.filter(models_v2.Port.network_id == network_id, - models_v2.Port.status == - const.PORT_STATUS_ACTIVE, - ml2_models.PortBinding.host == agent_host) - return query.count() diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py deleted file mode 100644 index af4a427fc..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange - -from oslo.config import cfg - -from neutron.common import constants as const -from neutron import context as n_context -from neutron.db import api as db_api -from neutron.openstack.common import log as logging -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.l2pop import config # noqa -from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db -from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc - -LOG = logging.getLogger(__name__) - - -class L2populationMechanismDriver(api.MechanismDriver, - l2pop_db.L2populationDbMixin): - - def __init__(self): - super(L2populationMechanismDriver, self).__init__() - self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() - - def initialize(self): - LOG.debug(_("Experimental L2 population driver")) - self.rpc_ctx = n_context.get_admin_context_without_session() - self.migrated_ports = {} - self.deleted_ports = {} - - def _get_port_fdb_entries(self, port): - return [[port['mac_address'], - ip['ip_address']] for ip in port['fixed_ips']] - - def delete_port_precommit(self, context): - # TODO(matrohon): revisit once the original bound segment will be - # available in delete_port_postcommit. in delete_port_postcommit - # agent_active_ports will be equal to 0, and the _update_port_down - # won't need agent_active_ports_count_for_flooding anymore - port_context = context.current - fdb_entries = self._update_port_down(context, port_context, 1) - self.deleted_ports[context.current['id']] = fdb_entries - - def delete_port_postcommit(self, context): - fanout_msg = self.deleted_ports.pop(context.current['id'], None) - if fanout_msg: - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fanout_msg) - - def _get_diff_ips(self, orig, port): - orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) - port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) - - # check if an ip has been added or removed - orig_chg_ips = orig_ips.difference(port_ips) - port_chg_ips = port_ips.difference(orig_ips) - - if orig_chg_ips or port_chg_ips: - return orig_chg_ips, port_chg_ips - - def _fixed_ips_changed(self, context, orig, port, diff_ips): - orig_ips, port_ips = diff_ips - - port_infos = self._get_port_infos(context, orig) - if not port_infos: - return - agent, agent_ip, segment, port_fdb_entries = port_infos - - orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips] - port_mac_ip = [[port['mac_address'], ip] for ip in port_ips] - - upd_fdb_entries = {port['network_id']: {agent_ip: {}}} - - ports = upd_fdb_entries[port['network_id']][agent_ip] - if orig_mac_ip: - ports['before'] = orig_mac_ip - - if port_mac_ip: - ports['after'] = port_mac_ip - - self.L2populationAgentNotify.update_fdb_entries( - self.rpc_ctx, {'chg_ip': upd_fdb_entries}) - - return True - - def update_port_postcommit(self, context): - port = context.current - orig = context.original - - diff_ips = self._get_diff_ips(orig, port) - if diff_ips: - self._fixed_ips_changed(context, orig, port, diff_ips) - if (port['binding:host_id'] != orig['binding:host_id'] - and port['status'] == const.PORT_STATUS_ACTIVE - and not self.migrated_ports.get(orig['id'])): - # The port has been migrated. We have to store the original - # binding to send appropriate fdb once the port will be set - # on the destination host - self.migrated_ports[orig['id']] = orig - elif port['status'] != orig['status']: - if port['status'] == const.PORT_STATUS_ACTIVE: - self._update_port_up(context) - elif port['status'] == const.PORT_STATUS_DOWN: - fdb_entries = self._update_port_down(context, port) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - elif port['status'] == const.PORT_STATUS_BUILD: - orig = self.migrated_ports.pop(port['id'], None) - if orig: - # this port has been migrated : remove its entries from fdb - fdb_entries = self._update_port_down(context, orig) - self.L2populationAgentNotify.remove_fdb_entries( - self.rpc_ctx, fdb_entries) - - def _get_port_infos(self, context, port): - agent_host = port['binding:host_id'] - if not agent_host: - return - - session = db_api.get_session() - agent = self.get_agent_by_host(session, agent_host) - if not agent: - return - - agent_ip = self.get_agent_ip(agent) - if not agent_ip: - LOG.warning(_("Unable to retrieve the agent ip, check the agent " - "configuration.")) - return - - segment = context.bound_segment - if not segment: - LOG.warning(_("Port %(port)s updated by agent %(agent)s " - "isn't bound to any segment"), - {'port': port['id'], 'agent': agent}) - return - - tunnel_types = self.get_agent_tunnel_types(agent) - if segment['network_type'] not in tunnel_types: - return - - fdb_entries = self._get_port_fdb_entries(port) - - return agent, agent_ip, segment, fdb_entries - - def _update_port_up(self, context): - port_context = context.current - port_infos = self._get_port_infos(context, port_context) - if not port_infos: - return - agent, agent_ip, segment, port_fdb_entries = port_infos - - agent_host = port_context['binding:host_id'] - network_id = port_context['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - - if agent_active_ports == 1 or ( - self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time): - # First port activated on current agent in this network, - # we have to provide it with the whole list of fdb entries - agent_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {}}} - ports = agent_fdb_entries[network_id]['ports'] - - network_ports = self.get_network_ports(session, network_id) - for network_port in network_ports: - binding, agent = network_port - if agent.host == agent_host: - continue - - ip = self.get_agent_ip(agent) - if not ip: - LOG.debug(_("Unable to retrieve the agent ip, check " - "the agent %(agent_host)s configuration."), - {'agent_host': agent.host}) - continue - - agent_ports = ports.get(ip, [const.FLOODING_ENTRY]) - agent_ports += self._get_port_fdb_entries(binding.port) - ports[ip] = agent_ports - - # And notify other agents to add flooding entry - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - - if ports.keys(): - self.L2populationAgentNotify.add_fdb_entries( - self.rpc_ctx, agent_fdb_entries, agent_host) - - # Notify other agents to add fdb rule for current port - other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries - - self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx, - other_fdb_entries) - - def _update_port_down(self, context, port_context, - agent_active_ports_count_for_flooding=0): - port_infos = self._get_port_infos(context, port_context) - if not port_infos: - return - agent, agent_ip, segment, port_fdb_entries = port_infos - - agent_host = port_context['binding:host_id'] - network_id = port_context['network_id'] - - session = db_api.get_session() - agent_active_ports = self.get_agent_network_active_port_count( - session, agent_host, network_id) - - other_fdb_entries = {network_id: - {'segment_id': segment['segmentation_id'], - 'network_type': segment['network_type'], - 'ports': {agent_ip: []}}} - if agent_active_ports == agent_active_ports_count_for_flooding: - # Agent is removing its last activated port in this network, - # other agents needs to be notified to delete their flooding entry. - other_fdb_entries[network_id]['ports'][agent_ip].append( - const.FLOODING_ENTRY) - # Notify other agents to remove fdb rules for current port - other_fdb_entries[network_id]['ports'][agent_ip] += port_fdb_entries - - return other_fdb_entries diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc.py deleted file mode 100644 index b4f171a27..000000000 --- a/neutron/plugins/ml2/drivers/l2pop/rpc.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sylvain Afchain, eNovance SAS -# @author: Francois Eleouet, Orange -# @author: Mathieu Rohon, Orange - -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class L2populationAgentNotifyAPI(rpc_compat.RpcProxy): - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic=topics.AGENT): - super(L2populationAgentNotifyAPI, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - - self.topic_l2pop_update = topics.get_topic_name(topic, - topics.L2POPULATION, - topics.UPDATE) - - def _notification_fanout(self, context, method, fdb_entries): - LOG.debug(_('Fanout notify l2population agents at %(topic)s ' - 'the message %(method)s with %(fdb_entries)s'), - {'topic': self.topic, - 'method': method, - 'fdb_entries': fdb_entries}) - - self.fanout_cast(context, - self.make_msg(method, fdb_entries=fdb_entries), - topic=self.topic_l2pop_update) - - def _notification_host(self, context, method, fdb_entries, host): - LOG.debug(_('Notify l2population agent %(host)s at %(topic)s the ' - 'message %(method)s with %(fdb_entries)s'), - {'host': host, - 'topic': self.topic, - 'method': method, - 'fdb_entries': fdb_entries}) - self.cast(context, - self.make_msg(method, fdb_entries=fdb_entries), - topic='%s.%s' % (self.topic_l2pop_update, host)) - - def add_fdb_entries(self, context, fdb_entries, host=None): - if fdb_entries: - if host: - self._notification_host(context, 'add_fdb_entries', - fdb_entries, host) - else: - self._notification_fanout(context, 'add_fdb_entries', - fdb_entries) - - def remove_fdb_entries(self, context, fdb_entries, host=None): - if fdb_entries: - if host: - self._notification_host(context, 'remove_fdb_entries', - fdb_entries, host) - else: - self._notification_fanout(context, 'remove_fdb_entries', - fdb_entries) - - def update_fdb_entries(self, context, fdb_entries, host=None): - if fdb_entries: - if host: - self._notification_host(context, 'update_fdb_entries', - fdb_entries, host) - else: - self._notification_fanout(context, 'update_fdb_entries', - fdb_entries) diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py deleted file mode 100644 index d0aad3ae9..000000000 --- a/neutron/plugins/ml2/drivers/mech_agent.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class AgentMechanismDriverBase(api.MechanismDriver): - """Base class for drivers that attach to networks using an L2 agent. - - The AgentMechanismDriverBase provides common code for mechanism - drivers that integrate the ml2 plugin with L2 agents. Port binding - with this driver requires the driver's associated agent to be - running on the port's host, and that agent to have connectivity to - at least one segment of the port's network. - - MechanismDrivers using this base class must pass the agent type to - __init__(), and must implement try_to_bind_segment_for_agent(). - """ - - def __init__(self, agent_type, - supported_vnic_types=[portbindings.VNIC_NORMAL]): - """Initialize base class for specific L2 agent type. - - :param agent_type: Constant identifying agent type in agents_db - :param supported_vnic_types: The binding:vnic_type values we can bind - """ - self.agent_type = agent_type - self.supported_vnic_types = supported_vnic_types - - def initialize(self): - pass - - def bind_port(self, context): - LOG.debug(_("Attempting to bind port %(port)s on " - "network %(network)s"), - {'port': context.current['id'], - 'network': context.network.current['id']}) - vnic_type = context.current.get(portbindings.VNIC_TYPE, - portbindings.VNIC_NORMAL) - if vnic_type not in self.supported_vnic_types: - LOG.debug(_("Refusing to bind due to unsupported vnic_type: %s"), - vnic_type) - return - for agent in context.host_agents(self.agent_type): - LOG.debug(_("Checking agent: %s"), agent) - if agent['alive']: - for segment in context.network.network_segments: - if self.try_to_bind_segment_for_agent(context, segment, - agent): - LOG.debug(_("Bound using segment: %s"), segment) - return - else: - LOG.warning(_("Attempting to bind with dead agent: %s"), - agent) - - @abc.abstractmethod - def try_to_bind_segment_for_agent(self, context, segment, agent): - """Try to bind with segment for agent. - - :param context: PortContext instance describing the port - :param segment: segment dictionary describing segment to bind - :param agent: agents_db entry describing agent to bind - :returns: True iff segment has been bound for agent - - Called inside transaction during bind_port() so that derived - MechanismDrivers can use agent_db data along with built-in - knowledge of the corresponding agent's capabilities to attempt - to bind to the specified network segment for the agent. - - If the segment can be bound for the agent, this function must - call context.set_binding() with appropriate values and then - return True. Otherwise, it must return False. - """ - - -@six.add_metaclass(abc.ABCMeta) -class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase): - """Base class for simple drivers using an L2 agent. - - The SimpleAgentMechanismDriverBase provides common code for - mechanism drivers that integrate the ml2 plugin with L2 agents, - where the binding:vif_type and binding:vif_details values are the - same for all bindings. Port binding with this driver requires the - driver's associated agent to be running on the port's host, and - that agent to have connectivity to at least one segment of the - port's network. - - MechanismDrivers using this base class must pass the agent type - and the values for binding:vif_type and binding:vif_details to - __init__(), and must implement check_segment_for_agent(). - """ - - def __init__(self, agent_type, vif_type, vif_details, - supported_vnic_types=[portbindings.VNIC_NORMAL]): - """Initialize base class for specific L2 agent type. - - :param agent_type: Constant identifying agent type in agents_db - :param vif_type: Value for binding:vif_type when bound - :param vif_details: Dictionary with details for VIF driver when bound - :param supported_vnic_types: The binding:vnic_type values we can bind - """ - super(SimpleAgentMechanismDriverBase, self).__init__( - agent_type, supported_vnic_types) - self.vif_type = vif_type - self.vif_details = vif_details - - def try_to_bind_segment_for_agent(self, context, segment, agent): - if self.check_segment_for_agent(segment, agent): - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details) - return True - else: - return False - - @abc.abstractmethod - def check_segment_for_agent(self, segment, agent): - """Check if segment can be bound for agent. - - :param segment: segment dictionary describing segment to bind - :param agent: agents_db entry describing agent to bind - :returns: True iff segment can be bound for agent - - Called inside transaction during bind_port so that derived - MechanismDrivers can use agent_db data along with built-in - knowledge of the corresponding agent's capabilities to - determine whether or not the specified network segment can be - bound for the agent. - """ diff --git a/neutron/plugins/ml2/drivers/mech_arista/README b/neutron/plugins/ml2/drivers/mech_arista/README deleted file mode 100644 index 6e30bf9e5..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/README +++ /dev/null @@ -1,9 +0,0 @@ - -Arista Neutron ML2 Mechanism Driver - -This mechanism driver implements ML2 Driver API and is used to manage the virtual and physical networks using Arista Hardware. - -Note: Initial version of this driver support VLANs only. - -For more details on use please refer to: -https://wiki.openstack.org/wiki/Arista-neutron-ml2-driver diff --git a/neutron/plugins/ml2/drivers/mech_arista/__init__.py b/neutron/plugins/ml2/drivers/mech_arista/__init__.py deleted file mode 100644 index 788cea1f7..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ml2/drivers/mech_arista/config.py b/neutron/plugins/ml2/drivers/mech_arista/config.py deleted file mode 100644 index 2f968c874..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/config.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from oslo.config import cfg - -""" Arista ML2 Mechanism driver specific configuration knobs. - -Following are user configurable options for Arista ML2 Mechanism -driver. The eapi_username, eapi_password, and eapi_host are -required options. Region Name must be the same that is used by -Keystone service. This option is available to support multiple -OpenStack/Neutron controllers. -""" - -ARISTA_DRIVER_OPTS = [ - cfg.StrOpt('eapi_username', - default='', - help=_('Username for Arista EOS. This is required field. ' - 'If not set, all communications to Arista EOS' - 'will fail.')), - cfg.StrOpt('eapi_password', - default='', - secret=True, # do not expose value in the logs - help=_('Password for Arista EOS. This is required field. ' - 'If not set, all communications to Arista EOS ' - 'will fail.')), - cfg.StrOpt('eapi_host', - default='', - help=_('Arista EOS IP address. This is required field. ' - 'If not set, all communications to Arista EOS' - 'will fail.')), - cfg.BoolOpt('use_fqdn', - default=True, - help=_('Defines if hostnames are sent to Arista EOS as FQDNs ' - '("node1.domain.com") or as short names ("node1"). ' - 'This is optional. If not set, a value of "True" ' - 'is assumed.')), - cfg.IntOpt('sync_interval', - default=180, - help=_('Sync interval in seconds between Neutron plugin and ' - 'EOS. This interval defines how often the ' - 'synchronization is performed. This is an optional ' - 'field. If not set, a value of 180 seconds is ' - 'assumed.')), - cfg.StrOpt('region_name', - default='RegionOne', - help=_('Defines Region Name that is assigned to this OpenStack ' - 'Controller. This is useful when multiple ' - 'OpenStack/Neutron controllers are managing the same ' - 'Arista HW clusters. Note that this name must match ' - 'with the region name registered (or known) to keystone ' - 'service. Authentication with Keysotne is performed by ' - 'EOS. This is optional. If not set, a value of ' - '"RegionOne" is assumed.')) -] - -cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista") diff --git a/neutron/plugins/ml2/drivers/mech_arista/db.py b/neutron/plugins/ml2/drivers/mech_arista/db.py deleted file mode 100644 index f47bcd140..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/db.py +++ /dev/null @@ -1,402 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sqlalchemy as sa - -from neutron import context as nctx -import neutron.db.api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import model_base -from neutron.db import models_v2 - -VLAN_SEGMENTATION = 'vlan' - -UUID_LEN = 36 -STR_LEN = 255 - - -class AristaProvisionedNets(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Stores networks provisioned on Arista EOS. - - Saves the segmentation ID for each network that is provisioned - on EOS. This information is used during synchronization between - Neutron and EOS. - """ - __tablename__ = 'arista_provisioned_nets' - - network_id = sa.Column(sa.String(UUID_LEN)) - segmentation_id = sa.Column(sa.Integer) - - def eos_network_representation(self, segmentation_type): - return {u'networkId': self.network_id, - u'segmentationTypeId': self.segmentation_id, - u'segmentationType': segmentation_type} - - -class AristaProvisionedVms(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Stores VMs provisioned on Arista EOS. - - All VMs launched on physical hosts connected to Arista - Switches are remembered - """ - __tablename__ = 'arista_provisioned_vms' - - vm_id = sa.Column(sa.String(STR_LEN)) - host_id = sa.Column(sa.String(STR_LEN)) - port_id = sa.Column(sa.String(UUID_LEN)) - network_id = sa.Column(sa.String(UUID_LEN)) - - def eos_vm_representation(self): - return {u'vmId': self.vm_id, - u'host': self.host_id, - u'ports': {self.port_id: [{u'portId': self.port_id, - u'networkId': self.network_id}]}} - - def eos_port_representation(self): - return {u'vmId': self.vm_id, - u'host': self.host_id, - u'portId': self.port_id, - u'networkId': self.network_id} - - -class AristaProvisionedTenants(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Stores Tenants provisioned on Arista EOS. - - Tenants list is maintained for sync between Neutron and EOS. - """ - __tablename__ = 'arista_provisioned_tenants' - - def eos_tenant_representation(self): - return {u'tenantId': self.tenant_id} - - -def remember_tenant(tenant_id): - """Stores a tenant information in repository. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - tenant = AristaProvisionedTenants(tenant_id=tenant_id) - session.add(tenant) - - -def forget_tenant(tenant_id): - """Removes a tenant information from repository. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - (session.query(AristaProvisionedTenants). - filter_by(tenant_id=tenant_id). - delete()) - - -def get_all_tenants(): - """Returns a list of all tenants stored in repository.""" - session = db.get_session() - with session.begin(): - return session.query(AristaProvisionedTenants).all() - - -def num_provisioned_tenants(): - """Returns number of tenants stored in repository.""" - session = db.get_session() - with session.begin(): - return session.query(AristaProvisionedTenants).count() - - -def remember_vm(vm_id, host_id, port_id, network_id, tenant_id): - """Stores all relevant information about a VM in repository. - - :param vm_id: globally unique identifier for VM instance - :param host_id: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - vm = AristaProvisionedVms( - vm_id=vm_id, - host_id=host_id, - port_id=port_id, - network_id=network_id, - tenant_id=tenant_id) - session.add(vm) - - -def forget_vm(vm_id, host_id, port_id, network_id, tenant_id): - """Removes all relevant information about a VM from repository. - - :param vm_id: globally unique identifier for VM instance - :param host_id: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - (session.query(AristaProvisionedVms). - filter_by(vm_id=vm_id, host_id=host_id, - port_id=port_id, tenant_id=tenant_id, - network_id=network_id).delete()) - - -def remember_network(tenant_id, network_id, segmentation_id): - """Stores all relevant information about a Network in repository. - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - :param segmentation_id: VLAN ID that is assigned to the network - """ - session = db.get_session() - with session.begin(): - net = AristaProvisionedNets( - tenant_id=tenant_id, - network_id=network_id, - segmentation_id=segmentation_id) - session.add(net) - - -def forget_network(tenant_id, network_id): - """Deletes all relevant information about a Network from repository. - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - """ - session = db.get_session() - with session.begin(): - (session.query(AristaProvisionedNets). - filter_by(tenant_id=tenant_id, network_id=network_id). - delete()) - - -def get_segmentation_id(tenant_id, network_id): - """Returns Segmentation ID (VLAN) associated with a network. - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - """ - session = db.get_session() - with session.begin(): - net = (session.query(AristaProvisionedNets). - filter_by(tenant_id=tenant_id, - network_id=network_id).first()) - return net and net.segmentation_id or None - - -def is_vm_provisioned(vm_id, host_id, port_id, - network_id, tenant_id): - """Checks if a VM is already known to EOS - - :returns: True, if yes; False otherwise. - :param vm_id: globally unique identifier for VM instance - :param host_id: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - num_vm = (session.query(AristaProvisionedVms). - filter_by(tenant_id=tenant_id, - vm_id=vm_id, - port_id=port_id, - network_id=network_id, - host_id=host_id).count()) - return num_vm > 0 - - -def is_network_provisioned(tenant_id, network_id, seg_id=None): - """Checks if a networks is already known to EOS - - :returns: True, if yes; False otherwise. - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - :param seg_id: Optionally matches the segmentation ID (VLAN) - """ - session = db.get_session() - with session.begin(): - if not seg_id: - num_nets = (session.query(AristaProvisionedNets). - filter_by(tenant_id=tenant_id, - network_id=network_id).count()) - else: - num_nets = (session.query(AristaProvisionedNets). - filter_by(tenant_id=tenant_id, - network_id=network_id, - segmentation_id=seg_id).count()) - return num_nets > 0 - - -def is_tenant_provisioned(tenant_id): - """Checks if a tenant is already known to EOS - - :returns: True, if yes; False otherwise. - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - num_tenants = (session.query(AristaProvisionedTenants). - filter_by(tenant_id=tenant_id).count()) - return num_tenants > 0 - - -def num_nets_provisioned(tenant_id): - """Returns number of networks for a given tennat. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - return (session.query(AristaProvisionedNets). - filter_by(tenant_id=tenant_id).count()) - - -def num_vms_provisioned(tenant_id): - """Returns number of VMs for a given tennat. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - return (session.query(AristaProvisionedVms). - filter_by(tenant_id=tenant_id).count()) - - -def get_networks(tenant_id): - """Returns all networks for a given tenant in EOS-compatible format. - - See AristaRPCWrapper.get_network_list() for return value format. - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - model = AristaProvisionedNets - # hack for pep8 E711: comparison to None should be - # 'if cond is not None' - none = None - all_nets = (session.query(model). - filter(model.tenant_id == tenant_id, - model.segmentation_id != none)) - res = dict( - (net.network_id, net.eos_network_representation( - VLAN_SEGMENTATION)) - for net in all_nets - ) - return res - - -def get_vms(tenant_id): - """Returns all VMs for a given tenant in EOS-compatible format. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - model = AristaProvisionedVms - # hack for pep8 E711: comparison to None should be - # 'if cond is not None' - none = None - all_vms = (session.query(model). - filter(model.tenant_id == tenant_id, - model.host_id != none, - model.vm_id != none, - model.network_id != none, - model.port_id != none)) - res = dict( - (vm.vm_id, vm.eos_vm_representation()) - for vm in all_vms - ) - return res - - -def get_ports(tenant_id): - """Returns all ports of VMs in EOS-compatible format. - - :param tenant_id: globally unique neutron tenant identifier - """ - session = db.get_session() - with session.begin(): - model = AristaProvisionedVms - # hack for pep8 E711: comparison to None should be - # 'if cond is not None' - none = None - all_ports = (session.query(model). - filter(model.tenant_id == tenant_id, - model.host_id != none, - model.vm_id != none, - model.network_id != none, - model.port_id != none)) - res = dict( - (port.port_id, port.eos_port_representation()) - for port in all_ports - ) - return res - - -def get_tenants(): - """Returns list of all tenants in EOS-compatible format.""" - session = db.get_session() - with session.begin(): - model = AristaProvisionedTenants - all_tenants = session.query(model) - res = dict( - (tenant.tenant_id, tenant.eos_tenant_representation()) - for tenant in all_tenants - ) - return res - - -class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2): - """Access to Neutron DB. - - Provides access to the Neutron Data bases for all provisioned - networks as well ports. This data is used during the synchronization - of DB between ML2 Mechanism Driver and Arista EOS - Names of the networks and ports are not stroed in Arista repository - They are pulled from Neutron DB. - """ - - def __init__(self): - self.admin_ctx = nctx.get_admin_context() - - def get_network_name(self, tenant_id, network_id): - network = self._get_network(tenant_id, network_id) - network_name = None - if network: - network_name = network[0]['name'] - return network_name - - def get_all_networks_for_tenant(self, tenant_id): - filters = {'tenant_id': [tenant_id]} - return super(NeutronNets, - self).get_networks(self.admin_ctx, filters=filters) or [] - - def get_all_ports_for_tenant(self, tenant_id): - filters = {'tenant_id': [tenant_id]} - return super(NeutronNets, - self).get_ports(self.admin_ctx, filters=filters) or [] - - def _get_network(self, tenant_id, network_id): - filters = {'tenant_id': [tenant_id], - 'id': [network_id]} - return super(NeutronNets, - self).get_networks(self.admin_ctx, filters=filters) or [] diff --git a/neutron/plugins/ml2/drivers/mech_arista/exceptions.py b/neutron/plugins/ml2/drivers/mech_arista/exceptions.py deleted file mode 100644 index b3dae3dae..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/exceptions.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -"""Exceptions used by Arista ML2 Mechanism Driver.""" - -from neutron.common import exceptions - - -class AristaRpcError(exceptions.NeutronException): - message = _('%(msg)s') - - -class AristaConfigError(exceptions.NeutronException): - message = _('%(msg)s') diff --git a/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py b/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py deleted file mode 100644 index d825693e2..000000000 --- a/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py +++ /dev/null @@ -1,1014 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading - -import jsonrpclib -from oslo.config import cfg - -from neutron.common import constants as n_const -from neutron.extensions import portbindings -from neutron.openstack.common import log as logging -from neutron.plugins.ml2.common import exceptions as ml2_exc -from neutron.plugins.ml2 import driver_api -from neutron.plugins.ml2.drivers.mech_arista import config # noqa -from neutron.plugins.ml2.drivers.mech_arista import db -from neutron.plugins.ml2.drivers.mech_arista import exceptions as arista_exc - -LOG = logging.getLogger(__name__) - -EOS_UNREACHABLE_MSG = _('Unable to reach EOS') - - -class AristaRPCWrapper(object): - """Wraps Arista JSON RPC. - - All communications between Neutron and EOS are over JSON RPC. - EOS - operating system used on Arista hardware - Command API - JSON RPC API provided by Arista EOS - """ - def __init__(self): - self._server = jsonrpclib.Server(self._eapi_host_url()) - self.keystone_conf = cfg.CONF.keystone_authtoken - self.region = cfg.CONF.ml2_arista.region_name - self._region_updated_time = None - # The cli_commands dict stores the mapping between the CLI command key - # and the actual CLI command. - self.cli_commands = {} - self.initialize_cli_commands() - - def _get_exit_mode_cmds(self, modes): - """Returns a list of 'exit' commands for the modes. - - :param modes: a list of CLI modes to exit out of. - """ - return ['exit'] * len(modes) - - def initialize_cli_commands(self): - self.cli_commands['timestamp'] = [] - - def check_cli_commands(self): - """Checks whether the CLI commands are vaild. - - This method tries to execute the commands on EOS and if it succeedes - the command is stored. - """ - cmd = ['show openstack config region %s timestamp' % self.region] - try: - self._run_eos_cmds(cmd) - self.cli_commands['timestamp'] = cmd - except arista_exc.AristaRpcError: - self.cli_commands['timestamp'] = [] - msg = _("'timestamp' command '%s' is not available on EOS") % cmd - LOG.warn(msg) - - def _keystone_url(self): - keystone_auth_url = ('%s://%s:%s/v2.0/' % - (self.keystone_conf.auth_protocol, - self.keystone_conf.auth_host, - self.keystone_conf.auth_port)) - return keystone_auth_url - - def get_tenants(self): - """Returns dict of all tenants known by EOS. - - :returns: dictionary containing the networks per tenant - and VMs allocated per tenant - """ - cmds = ['show openstack config region %s' % self.region] - command_output = self._run_eos_cmds(cmds) - tenants = command_output[0]['tenants'] - - return tenants - - def plug_port_into_network(self, vm_id, host_id, port_id, - net_id, tenant_id, port_name, device_owner): - """Genric routine plug a port of a VM instace into network. - - :param vm_id: globally unique identifier for VM instance - :param host: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - :param port_name: Name of the port - for display purposes - :param device_owner: Device owner - e.g. compute or network:dhcp - """ - if device_owner == n_const.DEVICE_OWNER_DHCP: - self.plug_dhcp_port_into_network(vm_id, - host_id, - port_id, - net_id, - tenant_id, - port_name) - elif device_owner.startswith('compute'): - self.plug_host_into_network(vm_id, - host_id, - port_id, - net_id, - tenant_id, - port_name) - - def plug_host_into_network(self, vm_id, host, port_id, - network_id, tenant_id, port_name): - """Creates VLAN between TOR and compute host. - - :param vm_id: globally unique identifier for VM instance - :param host: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - :param port_name: Name of the port - for display purposes - """ - cmds = ['tenant %s' % tenant_id, - 'vm id %s hostid %s' % (vm_id, host)] - if port_name: - cmds.append('port id %s name "%s" network-id %s' % - (port_id, port_name, network_id)) - else: - cmds.append('port id %s network-id %s' % - (port_id, network_id)) - cmds.append('exit') - cmds.append('exit') - self._run_openstack_cmds(cmds) - - def plug_dhcp_port_into_network(self, dhcp_id, host, port_id, - network_id, tenant_id, port_name): - """Creates VLAN between TOR and dhcp host. - - :param dhcp_id: globally unique identifier for dhcp - :param host: ID of the host where the dhcp is hosted - :param port_id: globally unique port ID that connects dhcp to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - :param port_name: Name of the port - for display purposes - """ - cmds = ['tenant %s' % tenant_id, - 'network id %s' % network_id] - if port_name: - cmds.append('dhcp id %s hostid %s port-id %s name "%s"' % - (dhcp_id, host, port_id, port_name)) - else: - cmds.append('dhcp id %s hostid %s port-id %s' % - (dhcp_id, host, port_id)) - cmds.append('exit') - self._run_openstack_cmds(cmds) - - def unplug_host_from_network(self, vm_id, host, port_id, - network_id, tenant_id): - """Removes previously configured VLAN between TOR and a host. - - :param vm_id: globally unique identifier for VM instance - :param host: ID of the host where the VM is placed - :param port_id: globally unique port ID that connects VM to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - """ - cmds = ['tenant %s' % tenant_id, - 'vm id %s hostid %s' % (vm_id, host), - 'no port id %s' % port_id, - 'exit', - 'exit'] - self._run_openstack_cmds(cmds) - - def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id, - network_id, tenant_id): - """Removes previously configured VLAN between TOR and a dhcp host. - - :param dhcp_id: globally unique identifier for dhcp - :param host: ID of the host where the dhcp is hosted - :param port_id: globally unique port ID that connects dhcp to network - :param network_id: globally unique neutron network identifier - :param tenant_id: globally unique neutron tenant identifier - """ - cmds = ['tenant %s' % tenant_id, - 'network id %s' % network_id, - 'no dhcp id %s port-id %s' % (dhcp_id, port_id), - 'exit'] - self._run_openstack_cmds(cmds) - - def create_network(self, tenant_id, network): - """Creates a single network on Arista hardware - - :param tenant_id: globally unique neutron tenant identifier - :param network: dict containing network_id, network_name and - segmentation_id - """ - self.create_network_bulk(tenant_id, [network]) - - def create_network_bulk(self, tenant_id, network_list): - """Creates a network on Arista Hardware - - :param tenant_id: globally unique neutron tenant identifier - :param network_list: list of dicts containing network_id, network_name - and segmentation_id - """ - cmds = ['tenant %s' % tenant_id] - # Create a reference to function to avoid name lookups in the loop - append_cmd = cmds.append - for network in network_list: - try: - append_cmd('network id %s name "%s"' % - (network['network_id'], network['network_name'])) - except KeyError: - append_cmd('network id %s' % network['network_id']) - # Enter segment mode without exiting out of network mode - append_cmd('segment 1 type vlan id %d' % - network['segmentation_id']) - cmds.extend(self._get_exit_mode_cmds(['segment', 'network', 'tenant'])) - self._run_openstack_cmds(cmds) - - def create_network_segments(self, tenant_id, network_id, - network_name, segments): - """Creates a network on Arista Hardware - - Note: This method is not used at the moment. create_network() - is used instead. This will be used once the support for - multiple segments is added in Neutron. - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - :param network_name: Network name - for display purposes - :param segments: List of segments in a given network - """ - if segments: - cmds = ['tenant %s' % tenant_id, - 'network id %s name "%s"' % (network_id, network_name)] - seg_num = 1 - for seg in segments: - cmds.append('segment %d type %s id %d' % (seg_num, - seg['network_type'], seg['segmentation_id'])) - seg_num += 1 - cmds.append('exit') # exit for segment mode - cmds.append('exit') # exit for network mode - cmds.append('exit') # exit for tenant mode - - self._run_openstack_cmds(cmds) - - def delete_network(self, tenant_id, network_id): - """Deletes a specified network for a given tenant - - :param tenant_id: globally unique neutron tenant identifier - :param network_id: globally unique neutron network identifier - """ - self.delete_network_bulk(tenant_id, [network_id]) - - def delete_network_bulk(self, tenant_id, network_id_list): - """Deletes the network ids specified for a tenant - - :param tenant_id: globally unique neutron tenant identifier - :param network_id_list: list of globally unique neutron network - identifiers - """ - cmds = ['tenant %s' % tenant_id] - for network_id in network_id_list: - cmds.append('no network id %s' % network_id) - cmds.extend(self._get_exit_mode_cmds(['network', 'tenant'])) - self._run_openstack_cmds(cmds) - - def delete_vm(self, tenant_id, vm_id): - """Deletes a VM from EOS for a given tenant - - :param tenant_id : globally unique neutron tenant identifier - :param vm_id : id of a VM that needs to be deleted. - """ - self.delete_vm_bulk(tenant_id, [vm_id]) - - def delete_vm_bulk(self, tenant_id, vm_id_list): - """Deletes VMs from EOS for a given tenant - - :param tenant_id : globally unique neutron tenant identifier - :param vm_id_list : ids of VMs that needs to be deleted. - """ - cmds = ['tenant %s' % tenant_id] - for vm_id in vm_id_list: - cmds.append('no vm id %s' % vm_id) - cmds.extend(self._get_exit_mode_cmds(['vm', 'tenant'])) - self._run_openstack_cmds(cmds) - - def create_vm_port_bulk(self, tenant_id, vm_port_list, vms): - """Sends a bulk request to create ports. - - :param tenant_id: globaly unique neutron tenant identifier - :param vm_port_list: list of ports that need to be created. - :param vms: list of vms to which the ports will be attached to. - """ - cmds = ['tenant %s' % tenant_id] - # Create a reference to function to avoid name lookups in the loop - append_cmd = cmds.append - for port in vm_port_list: - try: - vm = vms[port['device_id']] - except KeyError: - msg = _("VM id %(vmid)s not found for port %(portid)s") % { - 'vmid': port['device_id'], - 'portid': port['id']} - LOG.warn(msg) - continue - - port_name = '' if 'name' not in port else 'name "%s"' % ( - port['name'] - ) - - if port['device_owner'] == n_const.DEVICE_OWNER_DHCP: - append_cmd('network id %s' % port['network_id']) - append_cmd('dhcp id %s hostid %s port-id %s %s' % - (vm['vmId'], vm['host'], port['id'], port_name)) - elif port['device_owner'].startswith('compute'): - append_cmd('vm id %s hostid %s' % (vm['vmId'], vm['host'])) - append_cmd('port id %s %s network-id %s' % - (port['id'], port_name, port['network_id'])) - else: - msg = _("Unknown device owner: %s") % port['device_owner'] - LOG.warn(msg) - continue - - append_cmd('exit') - self._run_openstack_cmds(cmds) - - def delete_tenant(self, tenant_id): - """Deletes a given tenant and all its networks and VMs from EOS. - - :param tenant_id: globally unique neutron tenant identifier - """ - self.delete_tenant_bulk([tenant_id]) - - def delete_tenant_bulk(self, tenant_list): - """Sends a bulk request to delete the tenants. - - :param tenant_list: list of globaly unique neutron tenant ids which - need to be deleted. - """ - - cmds = [] - for tenant in tenant_list: - cmds.append('no tenant %s' % tenant) - cmds.append('exit') - self._run_openstack_cmds(cmds) - - def delete_this_region(self): - """Deleted the region data from EOS.""" - cmds = ['enable', - 'configure', - 'cvx', - 'service openstack', - 'no region %s' % self.region, - 'exit', - 'exit', - 'exit'] - self._run_eos_cmds(cmds) - - def register_with_eos(self): - """This is the registration request with EOS. - - This the initial handshake between Neutron and EOS. - critical end-point information is registered with EOS. - """ - cmds = ['auth url %s user "%s" password "%s"' % - (self._keystone_url(), - self.keystone_conf.admin_user, - self.keystone_conf.admin_password)] - - log_cmds = ['auth url %s user %s password ******' % - (self._keystone_url(), - self.keystone_conf.admin_user)] - - self._run_openstack_cmds(cmds, commands_to_log=log_cmds) - - def clear_region_updated_time(self): - """Clear the region updated time which forces a resync.""" - - self._region_updated_time = None - - def region_in_sync(self): - """Check whether EOS is in sync with Neutron.""" - - eos_region_updated_times = self.get_region_updated_time() - return (self._region_updated_time and - (self._region_updated_time['regionTimestamp'] == - eos_region_updated_times['regionTimestamp'])) - - def get_region_updated_time(self): - """Return the timestamp of the last update. - - This method returns the time at which any entities in the region - were updated. - """ - timestamp_cmd = self.cli_commands['timestamp'] - if timestamp_cmd: - return self._run_eos_cmds(commands=timestamp_cmd)[0] - return None - - def _run_eos_cmds(self, commands, commands_to_log=None): - """Execute/sends a CAPI (Command API) command to EOS. - - In this method, list of commands is appended with prefix and - postfix commands - to make is understandble by EOS. - - :param commands : List of command to be executed on EOS. - :param commands_to_log : This should be set to the command that is - logged. If it is None, then the commands - param is logged. - """ - - log_cmd = commands - if commands_to_log: - log_cmd = commands_to_log - - LOG.info(_('Executing command on Arista EOS: %s'), log_cmd) - - try: - # this returns array of return values for every command in - # full_command list - ret = self._server.runCmds(version=1, cmds=commands) - except Exception as error: - host = cfg.CONF.ml2_arista.eapi_host - msg = (_('Error %(err)s while trying to execute ' - 'commands %(cmd)s on EOS %(host)s') % - {'err': error, 'cmd': commands_to_log, 'host': host}) - LOG.exception(msg) - raise arista_exc.AristaRpcError(msg=msg) - - return ret - - def _build_command(self, cmds): - """Build full EOS's openstack CLI command. - - Helper method to add commands to enter and exit from openstack - CLI modes. - - :param cmds: The openstack CLI commands that need to be executed - in the openstack config mode. - """ - - full_command = [ - 'enable', - 'configure', - 'cvx', - 'service openstack', - 'region %s' % self.region, - ] - full_command.extend(cmds) - full_command.extend(self._get_exit_mode_cmds(['region', - 'openstack', - 'cvx'])) - full_command.extend(self.cli_commands['timestamp']) - return full_command - - def _run_openstack_cmds(self, commands, commands_to_log=None): - """Execute/sends a CAPI (Command API) command to EOS. - - In this method, list of commands is appended with prefix and - postfix commands - to make is understandble by EOS. - - :param commands : List of command to be executed on EOS. - :param commands_to_logs : This should be set to the command that is - logged. If it is None, then the commands - param is logged. - """ - - full_command = self._build_command(commands) - if commands_to_log: - full_log_command = self._build_command(commands_to_log) - else: - full_log_command = None - ret = self._run_eos_cmds(full_command, full_log_command) - # Remove return values for 'configure terminal', - # 'service openstack' and 'exit' commands - if self.cli_commands['timestamp']: - self._region_updated_time = ret[-1] - - def _eapi_host_url(self): - self._validate_config() - - user = cfg.CONF.ml2_arista.eapi_username - pwd = cfg.CONF.ml2_arista.eapi_password - host = cfg.CONF.ml2_arista.eapi_host - - eapi_server_url = ('https://%s:%s@%s/command-api' % - (user, pwd, host)) - return eapi_server_url - - def _validate_config(self): - if cfg.CONF.ml2_arista.get('eapi_host') == '': - msg = _('Required option eapi_host is not set') - LOG.error(msg) - raise arista_exc.AristaConfigError(msg=msg) - if cfg.CONF.ml2_arista.get('eapi_username') == '': - msg = _('Required option eapi_username is not set') - LOG.error(msg) - raise arista_exc.AristaConfigError(msg=msg) - - -class SyncService(object): - """Synchronizatin of information between Neutron and EOS - - Periodically (through configuration option), this service - ensures that Networks and VMs configured on EOS/Arista HW - are always in sync with Neutron DB. - """ - def __init__(self, rpc_wrapper, neutron_db): - self._rpc = rpc_wrapper - self._ndb = neutron_db - self._force_sync = True - - def synchronize(self): - """Sends data to EOS which differs from neutron DB.""" - - LOG.info(_('Syncing Neutron <-> EOS')) - try: - # Get the time at which entities in the region were updated. - # If the times match, then ML2 is in sync with EOS. Otherwise - # perform a complete sync. - if not self._force_sync and self._rpc.region_in_sync(): - LOG.info(_('OpenStack and EOS are in sync!')) - return - except arista_exc.AristaRpcError: - LOG.warning(EOS_UNREACHABLE_MSG) - self._force_sync = True - return - - try: - #Always register with EOS to ensure that it has correct credentials - self._rpc.register_with_eos() - eos_tenants = self._rpc.get_tenants() - except arista_exc.AristaRpcError: - LOG.warning(EOS_UNREACHABLE_MSG) - self._force_sync = True - return - - db_tenants = db.get_tenants() - - if not db_tenants and eos_tenants: - # No tenants configured in Neutron. Clear all EOS state - try: - self._rpc.delete_this_region() - msg = _('No Tenants configured in Neutron DB. But %d ' - 'tenants disovered in EOS during synchronization.' - 'Enitre EOS region is cleared') % len(eos_tenants) - LOG.info(msg) - # Re-register with EOS so that the timestamp is updated. - self._rpc.register_with_eos() - # Region has been completely cleaned. So there is nothing to - # syncronize - self._force_sync = False - except arista_exc.AristaRpcError: - LOG.warning(EOS_UNREACHABLE_MSG) - self._force_sync = True - return - - # Delete tenants that are in EOS, but not in the database - tenants_to_delete = frozenset(eos_tenants.keys()).difference( - db_tenants.keys()) - - if tenants_to_delete: - try: - self._rpc.delete_tenant_bulk(tenants_to_delete) - except arista_exc.AristaRpcError: - LOG.warning(EOS_UNREACHABLE_MSG) - self._force_sync = True - return - - # None of the commands have failed till now. But if subsequent - # operations fail, then force_sync is set to true - self._force_sync = False - - for tenant in db_tenants: - db_nets = db.get_networks(tenant) - db_vms = db.get_vms(tenant) - eos_nets = self._get_eos_networks(eos_tenants, tenant) - eos_vms = self._get_eos_vms(eos_tenants, tenant) - - db_nets_key_set = frozenset(db_nets.keys()) - db_vms_key_set = frozenset(db_vms.keys()) - eos_nets_key_set = frozenset(eos_nets.keys()) - eos_vms_key_set = frozenset(eos_vms.keys()) - - # Find the networks that are present on EOS, but not in Neutron DB - nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) - - # Find the VMs that are present on EOS, but not in Neutron DB - vms_to_delete = eos_vms_key_set.difference(db_vms_key_set) - - # Find the Networks that are present in Neutron DB, but not on EOS - nets_to_update = db_nets_key_set.difference(eos_nets_key_set) - - # Find the VMs that are present in Neutron DB, but not on EOS - vms_to_update = db_vms_key_set.difference(eos_vms_key_set) - - try: - if vms_to_delete: - self._rpc.delete_vm_bulk(tenant, vms_to_delete) - if nets_to_delete: - self._rpc.delete_network_bulk(tenant, nets_to_delete) - if nets_to_update: - # Create a dict of networks keyed by id. - neutron_nets = dict( - (network['id'], network) for network in - self._ndb.get_all_networks_for_tenant(tenant) - ) - - networks = [ - {'network_id': net_id, - 'segmentation_id': - db_nets[net_id]['segmentationTypeId'], - 'network_name': - neutron_nets.get(net_id, {'name': ''})['name'], } - for net_id in nets_to_update - ] - self._rpc.create_network_bulk(tenant, networks) - if vms_to_update: - # Filter the ports to only the vms that we are interested - # in. - vm_ports = [ - port for port in self._ndb.get_all_ports_for_tenant( - tenant) if port['device_id'] in vms_to_update - ] - self._rpc.create_vm_port_bulk(tenant, vm_ports, db_vms) - except arista_exc.AristaRpcError: - LOG.warning(EOS_UNREACHABLE_MSG) - self._force_sync = True - - def _get_eos_networks(self, eos_tenants, tenant): - networks = {} - if eos_tenants and tenant in eos_tenants: - networks = eos_tenants[tenant]['tenantNetworks'] - return networks - - def _get_eos_vms(self, eos_tenants, tenant): - vms = {} - if eos_tenants and tenant in eos_tenants: - vms = eos_tenants[tenant]['tenantVmInstances'] - return vms - - -class AristaDriver(driver_api.MechanismDriver): - """Ml2 Mechanism driver for Arista networking hardware. - - Remebers all networks and VMs that are provisioned on Arista Hardware. - Does not send network provisioning request if the network has already been - provisioned before for the given port. - """ - def __init__(self, rpc=None): - - self.rpc = rpc or AristaRPCWrapper() - self.db_nets = db.AristaProvisionedNets() - self.db_vms = db.AristaProvisionedVms() - self.db_tenants = db.AristaProvisionedTenants() - self.ndb = db.NeutronNets() - - confg = cfg.CONF.ml2_arista - self.segmentation_type = db.VLAN_SEGMENTATION - self.timer = None - self.eos = SyncService(self.rpc, self.ndb) - self.sync_timeout = confg['sync_interval'] - self.eos_sync_lock = threading.Lock() - - def initialize(self): - self.rpc.register_with_eos() - self._cleanup_db() - self.rpc.check_cli_commands() - # Registering with EOS updates self.rpc.region_updated_time. Clear it - # to force an initial sync - self.rpc.clear_region_updated_time() - self._synchronization_thread() - - def create_network_precommit(self, context): - """Remember the tenant, and network information.""" - - network = context.current - segments = context.network_segments - network_id = network['id'] - tenant_id = network['tenant_id'] - segmentation_id = segments[0]['segmentation_id'] - with self.eos_sync_lock: - db.remember_tenant(tenant_id) - db.remember_network(tenant_id, - network_id, - segmentation_id) - - def create_network_postcommit(self, context): - """Provision the network on the Arista Hardware.""" - - network = context.current - network_id = network['id'] - network_name = network['name'] - tenant_id = network['tenant_id'] - segments = context.network_segments - vlan_id = segments[0]['segmentation_id'] - with self.eos_sync_lock: - if db.is_network_provisioned(tenant_id, network_id): - try: - network_dict = { - 'network_id': network_id, - 'segmentation_id': vlan_id, - 'network_name': network_name} - self.rpc.create_network(tenant_id, network_dict) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - else: - msg = _('Network %s is not created as it is not found in' - 'Arista DB') % network_id - LOG.info(msg) - - def update_network_precommit(self, context): - """At the moment we only support network name change - - Any other change in network is not supported at this time. - We do not store the network names, therefore, no DB store - action is performed here. - """ - new_network = context.current - orig_network = context.original - if new_network['name'] != orig_network['name']: - msg = _('Network name changed to %s') % new_network['name'] - LOG.info(msg) - - def update_network_postcommit(self, context): - """At the moment we only support network name change - - If network name is changed, a new network create request is - sent to the Arista Hardware. - """ - new_network = context.current - orig_network = context.original - if new_network['name'] != orig_network['name']: - network_id = new_network['id'] - network_name = new_network['name'] - tenant_id = new_network['tenant_id'] - vlan_id = new_network['provider:segmentation_id'] - with self.eos_sync_lock: - if db.is_network_provisioned(tenant_id, network_id): - try: - network_dict = { - 'network_id': network_id, - 'segmentation_id': vlan_id, - 'network_name': network_name} - self.rpc.create_network(tenant_id, network_dict) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - else: - msg = _('Network %s is not updated as it is not found in' - 'Arista DB') % network_id - LOG.info(msg) - - def delete_network_precommit(self, context): - """Delete the network infromation from the DB.""" - network = context.current - network_id = network['id'] - tenant_id = network['tenant_id'] - with self.eos_sync_lock: - if db.is_network_provisioned(tenant_id, network_id): - db.forget_network(tenant_id, network_id) - # if necessary, delete tenant as well. - self.delete_tenant(tenant_id) - - def delete_network_postcommit(self, context): - """Send network delete request to Arista HW.""" - network = context.current - network_id = network['id'] - tenant_id = network['tenant_id'] - with self.eos_sync_lock: - - # Succeed deleting network in case EOS is not accessible. - # EOS state will be updated by sync thread once EOS gets - # alive. - try: - self.rpc.delete_network(tenant_id, network_id) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - - def create_port_precommit(self, context): - """Remember the infromation about a VM and its ports - - A VM information, along with the physical host information - is saved. - """ - port = context.current - device_id = port['device_id'] - device_owner = port['device_owner'] - host = port[portbindings.HOST_ID] - - # device_id and device_owner are set on VM boot - is_vm_boot = device_id and device_owner - if host and is_vm_boot: - port_id = port['id'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - with self.eos_sync_lock: - db.remember_vm(device_id, host, port_id, - network_id, tenant_id) - - def create_port_postcommit(self, context): - """Plug a physical host into a network. - - Send provisioning request to Arista Hardware to plug a host - into appropriate network. - """ - port = context.current - device_id = port['device_id'] - device_owner = port['device_owner'] - host = port[portbindings.HOST_ID] - - # device_id and device_owner are set on VM boot - is_vm_boot = device_id and device_owner - if host and is_vm_boot: - port_id = port['id'] - port_name = port['name'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - with self.eos_sync_lock: - hostname = self._host_name(host) - vm_provisioned = db.is_vm_provisioned(device_id, - host, - port_id, - network_id, - tenant_id) - net_provisioned = db.is_network_provisioned(tenant_id, - network_id) - if vm_provisioned and net_provisioned: - try: - self.rpc.plug_port_into_network(device_id, - hostname, - port_id, - network_id, - tenant_id, - port_name, - device_owner) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - else: - msg = _('VM %s is not created as it is not found in ' - 'Arista DB') % device_id - LOG.info(msg) - - def update_port_precommit(self, context): - """Update the name of a given port. - - At the moment we only support port name change. - Any other change to port is not supported at this time. - We do not store the port names, therefore, no DB store - action is performed here. - """ - new_port = context.current - orig_port = context.original - if new_port['name'] != orig_port['name']: - msg = _('Port name changed to %s') % new_port['name'] - LOG.info(msg) - - def update_port_postcommit(self, context): - """Update the name of a given port in EOS. - - At the moment we only support port name change - Any other change to port is not supported at this time. - """ - port = context.current - orig_port = context.original - if port['name'] == orig_port['name']: - # nothing to do - return - - device_id = port['device_id'] - device_owner = port['device_owner'] - host = port[portbindings.HOST_ID] - is_vm_boot = device_id and device_owner - - if host and is_vm_boot: - port_id = port['id'] - port_name = port['name'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - with self.eos_sync_lock: - hostname = self._host_name(host) - segmentation_id = db.get_segmentation_id(tenant_id, - network_id) - vm_provisioned = db.is_vm_provisioned(device_id, - host, - port_id, - network_id, - tenant_id) - net_provisioned = db.is_network_provisioned(tenant_id, - network_id, - segmentation_id) - if vm_provisioned and net_provisioned: - try: - self.rpc.plug_port_into_network(device_id, - hostname, - port_id, - network_id, - tenant_id, - port_name, - device_owner) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - else: - msg = _('VM %s is not updated as it is not found in ' - 'Arista DB') % device_id - LOG.info(msg) - - def delete_port_precommit(self, context): - """Delete information about a VM and host from the DB.""" - port = context.current - - host_id = port[portbindings.HOST_ID] - device_id = port['device_id'] - tenant_id = port['tenant_id'] - network_id = port['network_id'] - port_id = port['id'] - with self.eos_sync_lock: - if db.is_vm_provisioned(device_id, host_id, port_id, - network_id, tenant_id): - db.forget_vm(device_id, host_id, port_id, - network_id, tenant_id) - # if necessary, delete tenant as well. - self.delete_tenant(tenant_id) - - def delete_port_postcommit(self, context): - """unPlug a physical host from a network. - - Send provisioning request to Arista Hardware to unplug a host - from appropriate network. - """ - port = context.current - device_id = port['device_id'] - host = port[portbindings.HOST_ID] - port_id = port['id'] - network_id = port['network_id'] - tenant_id = port['tenant_id'] - device_owner = port['device_owner'] - - try: - with self.eos_sync_lock: - hostname = self._host_name(host) - if device_owner == n_const.DEVICE_OWNER_DHCP: - self.rpc.unplug_dhcp_port_from_network(device_id, - hostname, - port_id, - network_id, - tenant_id) - else: - self.rpc.unplug_host_from_network(device_id, - hostname, - port_id, - network_id, - tenant_id) - except arista_exc.AristaRpcError: - LOG.info(EOS_UNREACHABLE_MSG) - raise ml2_exc.MechanismDriverError() - - def delete_tenant(self, tenant_id): - """delete a tenant from DB. - - A tenant is deleted only if there is no network or VM configured - configured for this tenant. - """ - objects_for_tenant = (db.num_nets_provisioned(tenant_id) + - db.num_vms_provisioned(tenant_id)) - if not objects_for_tenant: - db.forget_tenant(tenant_id) - - def _host_name(self, hostname): - fqdns_used = cfg.CONF.ml2_arista['use_fqdn'] - return hostname if fqdns_used else hostname.split('.')[0] - - def _synchronization_thread(self): - with self.eos_sync_lock: - self.eos.synchronize() - - self.timer = threading.Timer(self.sync_timeout, - self._synchronization_thread) - self.timer.start() - - def stop_synchronization_thread(self): - if self.timer: - self.timer.cancel() - self.timer = None - - def _cleanup_db(self): - """Clean up any uncessary entries in our DB.""" - db_tenants = db.get_tenants() - for tenant in db_tenants: - neutron_nets = self.ndb.get_all_networks_for_tenant(tenant) - neutron_nets_id = [] - for net in neutron_nets: - neutron_nets_id.append(net['id']) - db_nets = db.get_networks(tenant) - for net_id in db_nets.keys(): - if net_id not in neutron_nets_id: - db.forget_network(tenant, net_id) diff --git a/neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py b/neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py b/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py deleted file mode 100644 index d8fd53dd1..000000000 --- a/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py +++ /dev/null @@ -1,130 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2014 Big Switch Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc. -# @author: Kevin Benton, Big Switch Networks, Inc. -import copy -import httplib - -import eventlet -from oslo.config import cfg - -from neutron import context as ctx -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import log -from neutron.plugins.bigswitch import config as pl_config -from neutron.plugins.bigswitch import plugin -from neutron.plugins.bigswitch import servermanager -from neutron.plugins.ml2 import driver_api as api - - -LOG = log.getLogger(__name__) - - -class BigSwitchMechanismDriver(plugin.NeutronRestProxyV2Base, - api.MechanismDriver): - - """Mechanism Driver for Big Switch Networks Controller. - - This driver relays the network create, update, delete - operations to the Big Switch Controller. - """ - - def initialize(self): - LOG.debug(_('Initializing driver')) - - # register plugin config opts - pl_config.register_config() - self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size) - # backend doesn't support bulk operations yet - self.native_bulk_support = False - - # init network ctrl connections - self.servers = servermanager.ServerPool() - self.servers.get_topo_function = self._get_all_data - self.servers.get_topo_function_args = {'get_ports': True, - 'get_floating_ips': False, - 'get_routers': False} - self.segmentation_types = ', '.join(cfg.CONF.ml2.type_drivers) - LOG.debug(_("Initialization done")) - - def create_network_postcommit(self, context): - # create network on the network controller - self._send_create_network(context.current) - - def update_network_postcommit(self, context): - # update network on the network controller - self._send_update_network(context.current) - - def delete_network_postcommit(self, context): - # delete network on the network controller - self._send_delete_network(context.current) - - def create_port_postcommit(self, context): - # create port on the network controller - port = self._prepare_port_for_controller(context) - if port: - self.async_port_create(port["network"]["tenant_id"], - port["network"]["id"], port) - - def update_port_postcommit(self, context): - # update port on the network controller - port = self._prepare_port_for_controller(context) - if port: - try: - self.servers.rest_update_port(port["network"]["tenant_id"], - port["network"]["id"], port) - except servermanager.RemoteRestError as e: - with excutils.save_and_reraise_exception() as ctxt: - if (cfg.CONF.RESTPROXY.auto_sync_on_failure and - e.status == httplib.NOT_FOUND and - servermanager.NXNETWORK in e.reason): - ctxt.reraise = False - LOG.error(_("Iconsistency with backend controller " - "triggering full synchronization.")) - topoargs = self.servers.get_topo_function_args - self._send_all_data( - send_ports=topoargs['get_ports'], - send_floating_ips=topoargs['get_floating_ips'], - send_routers=topoargs['get_routers'], - triggered_by_tenant=port["network"]["tenant_id"] - ) - - def delete_port_postcommit(self, context): - # delete port on the network controller - port = context.current - net = context.network.current - self.servers.rest_delete_port(net["tenant_id"], net["id"], port['id']) - - def _prepare_port_for_controller(self, context): - # make a copy so the context isn't changed for other drivers - port = copy.deepcopy(context.current) - net = context.network.current - port['network'] = net - port['bound_segment'] = context.bound_segment - actx = ctx.get_admin_context() - prepped_port = self._extend_port_dict_binding(actx, port) - prepped_port = self._map_state_and_status(prepped_port) - if (portbindings.HOST_ID not in prepped_port or - prepped_port[portbindings.HOST_ID] == ''): - LOG.warning(_("Ignoring port notification to controller because " - "of missing host ID.")) - # in ML2, controller doesn't care about ports without - # the host_id set - return False - return prepped_port diff --git a/neutron/plugins/ml2/drivers/mech_hyperv.py b/neutron/plugins/ml2/drivers/mech_hyperv.py deleted file mode 100644 index b384d3425..000000000 --- a/neutron/plugins/ml2/drivers/mech_hyperv.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import mech_agent - -LOG = log.getLogger(__name__) - - -class HypervMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): - """Attach to networks using hyperv L2 agent. - - The HypervMechanismDriver integrates the ml2 plugin with the - hyperv L2 agent. Port binding with this driver requires the hyperv - agent to be running on the port's host, and that agent to have - connectivity to at least one segment of the port's network. - """ - - def __init__(self): - super(HypervMechanismDriver, self).__init__( - constants.AGENT_TYPE_HYPERV, - portbindings.VIF_TYPE_HYPERV, - {portbindings.CAP_PORT_FILTER: False}) - - def check_segment_for_agent(self, segment, agent): - mappings = agent['configurations'].get('vswitch_mappings', {}) - LOG.debug(_("Checking segment: %(segment)s " - "for mappings: %(mappings)s"), - {'segment': segment, 'mappings': mappings}) - network_type = segment[api.NETWORK_TYPE] - if network_type == 'local': - return True - elif network_type in ['flat', 'vlan']: - for pattern in mappings: - if re.match(pattern, segment[api.PHYSICAL_NETWORK]): - return True - else: - return False - else: - return False diff --git a/neutron/plugins/ml2/drivers/mech_linuxbridge.py b/neutron/plugins/ml2/drivers/mech_linuxbridge.py deleted file mode 100644 index b304ad4ba..000000000 --- a/neutron/plugins/ml2/drivers/mech_linuxbridge.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import mech_agent - -LOG = log.getLogger(__name__) - - -class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): - """Attach to networks using linuxbridge L2 agent. - - The LinuxbridgeMechanismDriver integrates the ml2 plugin with the - linuxbridge L2 agent. Port binding with this driver requires the - linuxbridge agent to be running on the port's host, and that agent - to have connectivity to at least one segment of the port's - network. - """ - - def __init__(self): - super(LinuxbridgeMechanismDriver, self).__init__( - constants.AGENT_TYPE_LINUXBRIDGE, - portbindings.VIF_TYPE_BRIDGE, - {portbindings.CAP_PORT_FILTER: True}) - - def check_segment_for_agent(self, segment, agent): - mappings = agent['configurations'].get('interface_mappings', {}) - tunnel_types = agent['configurations'].get('tunnel_types', []) - LOG.debug(_("Checking segment: %(segment)s " - "for mappings: %(mappings)s " - "with tunnel_types: %(tunnel_types)s"), - {'segment': segment, 'mappings': mappings, - 'tunnel_types': tunnel_types}) - network_type = segment[api.NETWORK_TYPE] - if network_type == 'local': - return True - elif network_type in tunnel_types: - return True - elif network_type in ['flat', 'vlan']: - return segment[api.PHYSICAL_NETWORK] in mappings - else: - return False diff --git a/neutron/plugins/ml2/drivers/mech_ofagent.py b/neutron/plugins/ml2/drivers/mech_ofagent.py deleted file mode 100644 index b593e61d6..000000000 --- a/neutron/plugins/ml2/drivers/mech_ofagent.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2014 VA Linux Systems Japan K.K. -# Based on openvswitch mechanism driver. -# -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import mech_agent - -LOG = log.getLogger(__name__) - - -class OfagentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): - """Attach to networks using ofagent L2 agent. - - The OfagentMechanismDriver integrates the ml2 plugin with the - ofagent L2 agent. Port binding with this driver requires the - ofagent agent to be running on the port's host, and that agent - to have connectivity to at least one segment of the port's - network. - """ - - def __init__(self): - super(OfagentMechanismDriver, self).__init__( - constants.AGENT_TYPE_OFA, - portbindings.VIF_TYPE_OVS, - {portbindings.CAP_PORT_FILTER: True, - portbindings.OVS_HYBRID_PLUG: True}) - - def check_segment_for_agent(self, segment, agent): - mappings = agent['configurations'].get('bridge_mappings', {}) - tunnel_types = agent['configurations'].get('tunnel_types', []) - LOG.debug(_("Checking segment: %(segment)s " - "for mappings: %(mappings)s " - "with tunnel_types: %(tunnel_types)s"), - {'segment': segment, 'mappings': mappings, - 'tunnel_types': tunnel_types}) - network_type = segment[api.NETWORK_TYPE] - return ( - network_type == p_const.TYPE_LOCAL or - network_type in tunnel_types or - (network_type in [p_const.TYPE_FLAT, p_const.TYPE_VLAN] and - segment[api.PHYSICAL_NETWORK] in mappings) - ) diff --git a/neutron/plugins/ml2/drivers/mech_openvswitch.py b/neutron/plugins/ml2/drivers/mech_openvswitch.py deleted file mode 100644 index 0565b9730..000000000 --- a/neutron/plugins/ml2/drivers/mech_openvswitch.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import mech_agent - -LOG = log.getLogger(__name__) - - -class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): - """Attach to networks using openvswitch L2 agent. - - The OpenvswitchMechanismDriver integrates the ml2 plugin with the - openvswitch L2 agent. Port binding with this driver requires the - openvswitch agent to be running on the port's host, and that agent - to have connectivity to at least one segment of the port's - network. - """ - - def __init__(self): - super(OpenvswitchMechanismDriver, self).__init__( - constants.AGENT_TYPE_OVS, - portbindings.VIF_TYPE_OVS, - {portbindings.CAP_PORT_FILTER: True, - portbindings.OVS_HYBRID_PLUG: True}) - - def check_segment_for_agent(self, segment, agent): - mappings = agent['configurations'].get('bridge_mappings', {}) - tunnel_types = agent['configurations'].get('tunnel_types', []) - LOG.debug(_("Checking segment: %(segment)s " - "for mappings: %(mappings)s " - "with tunnel_types: %(tunnel_types)s"), - {'segment': segment, 'mappings': mappings, - 'tunnel_types': tunnel_types}) - network_type = segment[api.NETWORK_TYPE] - if network_type == 'local': - return True - elif network_type in tunnel_types: - return True - elif network_type in ['flat', 'vlan']: - return segment[api.PHYSICAL_NETWORK] in mappings - else: - return False diff --git a/neutron/plugins/ml2/drivers/mechanism_fslsdn.py b/neutron/plugins/ml2/drivers/mechanism_fslsdn.py deleted file mode 100755 index 514fd9b86..000000000 --- a/neutron/plugins/ml2/drivers/mechanism_fslsdn.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) 2014 Freescale Semiconductor -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# @author: Trinath Somanchi, Freescale, Inc - - -from neutronclient.v2_0 import client -from oslo.config import cfg - -from neutron.common import constants as n_const -from neutron.common import log -from neutron.extensions import portbindings -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants -from neutron.plugins.ml2 import driver_api as api - - -LOG = logging.getLogger(__name__) - -# CRD service options required for FSL SDN OS Mech Driver -ml2_fslsdn_opts = [ - cfg.StrOpt('crd_user_name', default='crd', - help=_("CRD service Username")), - cfg.StrOpt('crd_password', default='password', - secret='True', - help=_("CRD Service Password")), - cfg.StrOpt('crd_tenant_name', default='service', - help=_("CRD Tenant Name")), - cfg.StrOpt('crd_auth_url', - default='http://127.0.0.1:5000/v2.0/', - help=_("CRD Auth URL")), - cfg.StrOpt('crd_url', - default='http://127.0.0.1:9797', - help=_("URL for connecting to CRD service")), - cfg.IntOpt('crd_url_timeout', - default=30, - help=_("Timeout value for connecting to " - "CRD service in seconds")), - cfg.StrOpt('crd_region_name', - default='RegionOne', - help=_("Region name for connecting to " - "CRD Service in admin context")), - cfg.BoolOpt('crd_api_insecure', - default=False, - help=_("If set, ignore any SSL validation issues")), - cfg.StrOpt('crd_auth_strategy', - default='keystone', - help=_("Auth strategy for connecting to " - "neutron in admin context")), - cfg.StrOpt('crd_ca_certificates_file', - help=_("Location of ca certificates file to use for " - "CRD client requests.")), -] - -# Register the configuration option for crd service -# required for FSL SDN OS Mechanism driver -cfg.CONF.register_opts(ml2_fslsdn_opts, "ml2_fslsdn") - -# shortcut -FSLCONF = cfg.CONF.ml2_fslsdn - -SERVICE_TYPE = 'crd' - - -class FslsdnMechanismDriver(api.MechanismDriver): - - """Freescale SDN OS Mechanism Driver for ML2 Plugin.""" - - @log.log - def initialize(self): - """Initialize the Mechanism driver.""" - - self.vif_type = portbindings.VIF_TYPE_OVS - self.vif_details = {portbindings.CAP_PORT_FILTER: True} - LOG.info(_("Initializing CRD client... ")) - crd_client_params = { - 'username': FSLCONF.crd_user_name, - 'tenant_name': FSLCONF.crd_tenant_name, - 'region_name': FSLCONF.crd_region_name, - 'password': FSLCONF.crd_password, - 'auth_url': FSLCONF.crd_auth_url, - 'auth_strategy': FSLCONF.crd_auth_strategy, - 'endpoint_url': FSLCONF.crd_url, - 'timeout': FSLCONF.crd_url_timeout, - 'insecure': FSLCONF.crd_api_insecure, - 'service_type': SERVICE_TYPE, - 'ca_cert': FSLCONF.crd_ca_certificates_file, - } - self._crdclient = client.Client(**crd_client_params) - - # Network Management - @staticmethod - @log.log - def _prepare_crd_network(network, segments): - """Helper function to create 'network' data.""" - - return {'network': - {'network_id': network['id'], - 'tenant_id': network['tenant_id'], - 'name': network['name'], - 'status': network['status'], - 'admin_state_up': network['admin_state_up'], - 'segments': segments, - }} - - def create_network_postcommit(self, context): - """Send create_network data to CRD service.""" - - network = context.current - segments = context.network_segments - body = self._prepare_crd_network(network, segments) - self._crdclient.create_network(body=body) - LOG.debug("create_network update sent to CRD Server: %s", body) - - def update_network_postcommit(self, context): - """Send update_network data to CRD service.""" - - network = context.current - segments = context.network_segments - body = self._prepare_crd_network(network, segments) - self._crdclient.update_network(network['id'], body=body) - LOG.debug("update_network update sent to CRD Server: %s", body) - - def delete_network_postcommit(self, context): - """Send delete_network data to CRD service.""" - - network = context.current - self._crdclient.delete_network(network['id']) - LOG.debug( - "delete_network update sent to CRD Server: %s", - network['id']) - - # Port Management - @staticmethod - def _prepare_crd_port(port): - """Helper function to prepare 'port' data.""" - - crd_subnet_id = '' - crd_ipaddress = '' - crd_sec_grps = '' - # Since CRD accepts one Fixed IP, - # so handle only one fixed IP per port. - if len(port['fixed_ips']) > 1: - LOG.debug("More than one fixed IP exists - using first one.") - # check empty fixed_ips list, move on if one or more exists - if len(port['fixed_ips']) != 0: - crd_subnet_id = port['fixed_ips'][0]['subnet_id'] - crd_ipaddress = port['fixed_ips'][0]['ip_address'] - LOG.debug("Handling fixed IP {subnet_id:%(subnet)s, " - "ip_address:%(ip)s}", - {'subnet': crd_subnet_id, 'ip': crd_ipaddress}) - else: - LOG.debug("No fixed IPs found.") - if 'security_groups' in port: - crd_sec_grps = ','.join(port['security_groups']) - return {'port': - {'port_id': port['id'], - 'tenant_id': port['tenant_id'], - 'name': port['name'], - 'network_id': port['network_id'], - 'subnet_id': crd_subnet_id, - 'mac_address': port['mac_address'], - 'device_id': port['device_id'], - 'ip_address': crd_ipaddress, - 'admin_state_up': port['admin_state_up'], - 'status': port['status'], - 'device_owner': port['device_owner'], - 'security_groups': crd_sec_grps, - }} - - def create_port_postcommit(self, context): - """Send create_port data to CRD service.""" - - port = context.current - body = self._prepare_crd_port(port) - self._crdclient.create_port(body=body) - LOG.debug("create_port update sent to CRD Server: %s", body) - - def delete_port_postcommit(self, context): - """Send delete_port data to CRD service.""" - - port = context.current - self._crdclient.delete_port(port['id']) - LOG.debug("delete_port update sent to CRD Server: %s", port['id']) - - # Subnet Management - @staticmethod - @log.log - def _prepare_crd_subnet(subnet): - """Helper function to prepare 'subnet' data.""" - - crd_allocation_pools = '' - crd_dns_nameservers = '' - crd_host_routes = '' - # Handling Allocation IPs - if 'allocation_pools' in subnet: - a_pools = subnet['allocation_pools'] - crd_allocation_pools = ','.join(["%s-%s" % (p['start'], - p['end']) - for p in a_pools]) - # Handling Host Routes - if 'host_routes' in subnet: - crd_host_routes = ','.join(["%s-%s" % (r['destination'], - r['nexthop']) - for r in subnet['host_routes']]) - # Handling DNS Nameservers - if 'dns_nameservers' in subnet: - crd_dns_nameservers = ','.join(subnet['dns_nameservers']) - # return Subnet Data - return {'subnet': - {'subnet_id': subnet['id'], - 'tenant_id': subnet['tenant_id'], - 'name': subnet['name'], - 'network_id': subnet['network_id'], - 'ip_version': subnet['ip_version'], - 'cidr': subnet['cidr'], - 'gateway_ip': subnet['gateway_ip'], - 'dns_nameservers': crd_dns_nameservers, - 'allocation_pools': crd_allocation_pools, - 'host_routes': crd_host_routes, - }} - - def create_subnet_postcommit(self, context): - """Send create_subnet data to CRD service.""" - - subnet = context.current - body = self._prepare_crd_subnet(subnet) - self._crdclient.create_subnet(body=body) - LOG.debug("create_subnet update sent to CRD Server: %s", body) - - def update_subnet_postcommit(self, context): - """Send update_subnet data to CRD service.""" - - subnet = context.current - body = self._prepare_crd_subnet(subnet) - self._crdclient.update_subnet(subnet['id'], body=body) - LOG.debug("update_subnet update sent to CRD Server: %s", body) - - def delete_subnet_postcommit(self, context): - """Send delete_subnet data to CRD service.""" - - subnet = context.current - self._crdclient.delete_subnet(subnet['id']) - LOG.debug("delete_subnet update sent to CRD Server: %s", subnet['id']) - - def bind_port(self, context): - """Set porting binding data for use with nova.""" - - LOG.debug("Attempting to bind port %(port)s on " - "network %(network)s", - {'port': context.current['id'], - 'network': context.network.current['id']}) - # Prepared porting binding data - for segment in context.network.network_segments: - if self.check_segment(segment): - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details, - status=n_const.PORT_STATUS_ACTIVE) - LOG.debug("Bound using segment: %s", segment) - return - else: - LOG.debug("Refusing to bind port for segment ID %(id)s, " - "segment %(seg)s, phys net %(physnet)s, and " - "network type %(nettype)s", - {'id': segment[api.ID], - 'seg': segment[api.SEGMENTATION_ID], - 'physnet': segment[api.PHYSICAL_NETWORK], - 'nettype': segment[api.NETWORK_TYPE]}) - - @log.log - def check_segment(self, segment): - """Verify a segment is valid for the FSL SDN MechanismDriver.""" - - return segment[api.NETWORK_TYPE] in [constants.TYPE_VLAN, - constants.TYPE_VXLAN] diff --git a/neutron/plugins/ml2/drivers/mechanism_ncs.py b/neutron/plugins/ml2/drivers/mechanism_ncs.py deleted file mode 100644 index 833447731..000000000 --- a/neutron/plugins/ml2/drivers/mechanism_ncs.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from oslo.config import cfg -import requests - -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -ncs_opts = [ - cfg.StrOpt('url', - help=_("HTTP URL of Tail-f NCS REST interface.")), - cfg.StrOpt('username', - help=_("HTTP username for authentication")), - cfg.StrOpt('password', secret=True, - help=_("HTTP password for authentication")), - cfg.IntOpt('timeout', default=10, - help=_("HTTP timeout in seconds.")) -] - -cfg.CONF.register_opts(ncs_opts, "ml2_ncs") - - -class NCSMechanismDriver(api.MechanismDriver): - - """Mechanism Driver for Tail-f Network Control System (NCS). - - This driver makes portions of the Neutron database available for - service provisioning in NCS. For example, NCS can use this - information to provision physical switches and routers in response - to OpenStack configuration changes. - - The database is replicated from Neutron to NCS using HTTP and JSON. - - The driver has two states: out-of-sync (initially) and in-sync. - - In the out-of-sync state each driver event triggers an attempt - to synchronize the complete database. On success the driver - transitions to the in-sync state. - - In the in-sync state each driver event triggers synchronization - of one network or port. On success the driver stays in-sync and - on failure it transitions to the out-of-sync state. - """ - out_of_sync = True - - def initialize(self): - self.url = cfg.CONF.ml2_ncs.url - self.timeout = cfg.CONF.ml2_ncs.timeout - self.username = cfg.CONF.ml2_ncs.username - self.password = cfg.CONF.ml2_ncs.password - - # Postcommit hooks are used to trigger synchronization. - - def create_network_postcommit(self, context): - self.synchronize('create', 'network', context) - - def update_network_postcommit(self, context): - self.synchronize('update', 'network', context) - - def delete_network_postcommit(self, context): - self.synchronize('delete', 'network', context) - - def create_subnet_postcommit(self, context): - self.synchronize('create', 'subnet', context) - - def update_subnet_postcommit(self, context): - self.synchronize('update', 'subnet', context) - - def delete_subnet_postcommit(self, context): - self.synchronize('delete', 'subnet', context) - - def create_port_postcommit(self, context): - self.synchronize('create', 'port', context) - - def update_port_postcommit(self, context): - self.synchronize('update', 'port', context) - - def delete_port_postcommit(self, context): - self.synchronize('delete', 'port', context) - - def synchronize(self, operation, object_type, context): - """Synchronize NCS with Neutron following a configuration change.""" - if self.out_of_sync: - self.sync_full(context) - else: - self.sync_object(operation, object_type, context) - - def sync_full(self, context): - """Resync the entire database to NCS. - Transition to the in-sync state on success. - """ - dbcontext = context._plugin_context - networks = context._plugin.get_networks(dbcontext) - subnets = context._plugin.get_subnets(dbcontext) - ports = context._plugin.get_ports(dbcontext) - for port in ports: - self.add_security_groups(context, dbcontext, port) - json = {'openstack': {'network': networks, - 'subnet': subnets, - 'port': ports}} - self.sendjson('put', '', json) - self.out_of_sync = False - - def sync_object(self, operation, object_type, context): - """Synchronize the single modified record to NCS. - Transition to the out-of-sync state on failure. - """ - self.out_of_sync = True - dbcontext = context._plugin_context - id = context.current['id'] - urlpath = object_type + '/' + id - if operation == 'delete': - self.sendjson('delete', urlpath, None) - else: - assert operation == 'create' or operation == 'update' - if object_type == 'network': - network = context._plugin.get_network(dbcontext, id) - self.sendjson('put', urlpath, {'network': network}) - elif object_type == 'subnet': - subnet = context._plugin.get_subnet(dbcontext, id) - self.sendjson('put', urlpath, {'subnet': subnet}) - else: - assert object_type == 'port' - port = context._plugin.get_port(dbcontext, id) - self.add_security_groups(context, dbcontext, port) - self.sendjson('put', urlpath, {'port': port}) - self.out_of_sync = False - - def add_security_groups(self, context, dbcontext, port): - """Populate the 'security_groups' field with entire records.""" - groups = [context._plugin.get_security_group(dbcontext, sg) - for sg in port['security_groups']] - port['security_groups'] = groups - - def sendjson(self, method, urlpath, obj): - obj = self.escape_keys(obj) - headers = {'Content-Type': 'application/vnd.yang.data+json'} - if obj is None: - data = None - else: - data = jsonutils.dumps(obj, indent=2) - auth = None - if self.username and self.password: - auth = (self.username, self.password) - if self.url: - url = '/'.join([self.url, urlpath]) - r = requests.request(method, url=url, - headers=headers, data=data, - auth=auth, timeout=self.timeout) - r.raise_for_status() - - def escape_keys(self, obj): - """Escape JSON keys to be NCS compatible. - NCS does not allow period (.) or colon (:) characters. - """ - if isinstance(obj, dict): - obj = dict((self.escape(k), self.escape_keys(v)) - for k, v in obj.iteritems()) - if isinstance(obj, list): - obj = [self.escape_keys(x) for x in obj] - return obj - - def escape(self, string): - return re.sub('[:._]', '-', string) diff --git a/neutron/plugins/ml2/drivers/mechanism_odl.py b/neutron/plugins/ml2/drivers/mechanism_odl.py deleted file mode 100644 index 416e870d1..000000000 --- a/neutron/plugins/ml2/drivers/mechanism_odl.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright (c) 2013-2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Kyle Mestery, Cisco Systems, Inc. -# @author: Dave Tucker, Hewlett-Packard Development Company L.P. - -import time - -from oslo.config import cfg -import requests - -from neutron.common import constants as n_const -from neutron.common import exceptions as n_exc -from neutron.common import utils -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.plugins.common import constants -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -ODL_NETWORK = 'network' -ODL_NETWORKS = 'networks' -ODL_SUBNET = 'subnet' -ODL_SUBNETS = 'subnets' -ODL_PORT = 'port' -ODL_PORTS = 'ports' - -not_found_exception_map = {ODL_NETWORKS: n_exc.NetworkNotFound, - ODL_SUBNETS: n_exc.SubnetNotFound, - ODL_PORTS: n_exc.PortNotFound} - -odl_opts = [ - cfg.StrOpt('url', - help=_("HTTP URL of OpenDaylight REST interface.")), - cfg.StrOpt('username', - help=_("HTTP username for authentication")), - cfg.StrOpt('password', secret=True, - help=_("HTTP password for authentication")), - cfg.IntOpt('timeout', default=10, - help=_("HTTP timeout in seconds.")), - cfg.IntOpt('session_timeout', default=30, - help=_("Tomcat session timeout in minutes.")), -] - -cfg.CONF.register_opts(odl_opts, "ml2_odl") - - -def try_del(d, keys): - """Ignore key errors when deleting from a dictionary.""" - for key in keys: - try: - del d[key] - except KeyError: - pass - - -class OpendaylightAuthError(n_exc.NeutronException): - message = '%(msg)s' - - -class JsessionId(requests.auth.AuthBase): - - """Attaches the JSESSIONID and JSESSIONIDSSO cookies to an HTTP Request. - - If the cookies are not available or when the session expires, a new - set of cookies are obtained. - """ - - def __init__(self, url, username, password): - """Initialization function for JsessionId.""" - - # NOTE(kmestery) The 'limit' paramater is intended to limit how much - # data is returned from ODL. This is not implemented in the Hydrogen - # release of OpenDaylight, but will be implemented in the Helium - # timeframe. Hydrogen will silently ignore this value. - self.url = str(url) + '/' + ODL_NETWORKS + '?limit=1' - self.username = username - self.password = password - self.auth_cookies = None - self.last_request = None - self.expired = None - self.session_timeout = cfg.CONF.ml2_odl.session_timeout * 60 - self.session_deadline = 0 - - def obtain_auth_cookies(self): - """Make a REST call to obtain cookies for ODL authenticiation.""" - - try: - r = requests.get(self.url, auth=(self.username, self.password)) - r.raise_for_status() - except requests.exceptions.HTTPError as e: - raise OpendaylightAuthError(msg=_("Failed to authenticate with " - "OpenDaylight: %s") % e) - except requests.exceptions.Timeout as e: - raise OpendaylightAuthError(msg=_("Authentication Timed" - " Out: %s") % e) - - jsessionid = r.cookies.get('JSESSIONID') - jsessionidsso = r.cookies.get('JSESSIONIDSSO') - if jsessionid and jsessionidsso: - self.auth_cookies = dict(JSESSIONID=jsessionid, - JSESSIONIDSSO=jsessionidsso) - - def __call__(self, r): - """Verify timestamp for Tomcat session timeout.""" - - if time.time() > self.session_deadline: - self.obtain_auth_cookies() - self.session_deadline = time.time() + self.session_timeout - r.prepare_cookies(self.auth_cookies) - return r - - -class OpenDaylightMechanismDriver(api.MechanismDriver): - - """Mechanism Driver for OpenDaylight. - - This driver was a port from the Tail-F NCS MechanismDriver. The API - exposed by ODL is slightly different from the API exposed by NCS, - but the general concepts are the same. - """ - auth = None - out_of_sync = True - - def initialize(self): - self.url = cfg.CONF.ml2_odl.url - self.timeout = cfg.CONF.ml2_odl.timeout - self.username = cfg.CONF.ml2_odl.username - self.password = cfg.CONF.ml2_odl.password - required_opts = ('url', 'username', 'password') - for opt in required_opts: - if not getattr(self, opt): - raise cfg.RequiredOptError(opt, 'ml2_odl') - self.auth = JsessionId(self.url, self.username, self.password) - self.vif_type = portbindings.VIF_TYPE_OVS - self.vif_details = {portbindings.CAP_PORT_FILTER: True} - - # Postcommit hooks are used to trigger synchronization. - - def create_network_postcommit(self, context): - self.synchronize('create', ODL_NETWORKS, context) - - def update_network_postcommit(self, context): - self.synchronize('update', ODL_NETWORKS, context) - - def delete_network_postcommit(self, context): - self.synchronize('delete', ODL_NETWORKS, context) - - def create_subnet_postcommit(self, context): - self.synchronize('create', ODL_SUBNETS, context) - - def update_subnet_postcommit(self, context): - self.synchronize('update', ODL_SUBNETS, context) - - def delete_subnet_postcommit(self, context): - self.synchronize('delete', ODL_SUBNETS, context) - - def create_port_postcommit(self, context): - self.synchronize('create', ODL_PORTS, context) - - def update_port_postcommit(self, context): - self.synchronize('update', ODL_PORTS, context) - - def delete_port_postcommit(self, context): - self.synchronize('delete', ODL_PORTS, context) - - def synchronize(self, operation, object_type, context): - """Synchronize ODL with Neutron following a configuration change.""" - if self.out_of_sync: - self.sync_full(context) - else: - self.sync_object(operation, object_type, context) - - def filter_create_network_attributes(self, network, context, dbcontext): - """Filter out network attributes not required for a create.""" - try_del(network, ['status', 'subnets']) - - def filter_create_subnet_attributes(self, subnet, context, dbcontext): - """Filter out subnet attributes not required for a create.""" - pass - - def filter_create_port_attributes(self, port, context, dbcontext): - """Filter out port attributes not required for a create.""" - self.add_security_groups(context, dbcontext, port) - # TODO(kmestery): Converting to uppercase due to ODL bug - # https://bugs.opendaylight.org/show_bug.cgi?id=477 - port['mac_address'] = port['mac_address'].upper() - try_del(port, ['status']) - - def sync_resources(self, resource_name, collection_name, resources, - context, dbcontext, attr_filter): - """Sync objects from Neutron over to OpenDaylight. - - This will handle syncing networks, subnets, and ports from Neutron to - OpenDaylight. It also filters out the requisite items which are not - valid for create API operations. - """ - to_be_synced = [] - for resource in resources: - try: - urlpath = collection_name + '/' + resource['id'] - self.sendjson('get', urlpath, None) - except requests.exceptions.HTTPError as e: - with excutils.save_and_reraise_exception() as ctx: - if e.response.status_code == 404: - attr_filter(resource, context, dbcontext) - to_be_synced.append(resource) - ctx.reraise = False - - key = resource_name if len(to_be_synced) == 1 else collection_name - - # 400 errors are returned if an object exists, which we ignore. - self.sendjson('post', collection_name, {key: to_be_synced}, [400]) - - @utils.synchronized('odl-sync-full') - def sync_full(self, context): - """Resync the entire database to ODL. - - Transition to the in-sync state on success. - Note: we only allow a single thead in here at a time. - """ - if not self.out_of_sync: - return - dbcontext = context._plugin_context - networks = context._plugin.get_networks(dbcontext) - subnets = context._plugin.get_subnets(dbcontext) - ports = context._plugin.get_ports(dbcontext) - - self.sync_resources(ODL_NETWORK, ODL_NETWORKS, networks, - context, dbcontext, - self.filter_create_network_attributes) - self.sync_resources(ODL_SUBNET, ODL_SUBNETS, subnets, - context, dbcontext, - self.filter_create_subnet_attributes) - self.sync_resources(ODL_PORT, ODL_PORTS, ports, - context, dbcontext, - self.filter_create_port_attributes) - self.out_of_sync = False - - def filter_update_network_attributes(self, network, context, dbcontext): - """Filter out network attributes for an update operation.""" - try_del(network, ['id', 'status', 'subnets', 'tenant_id']) - - def filter_update_subnet_attributes(self, subnet, context, dbcontext): - """Filter out subnet attributes for an update operation.""" - try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', - 'allocation_pools', 'tenant_id']) - - def filter_update_port_attributes(self, port, context, dbcontext): - """Filter out port attributes for an update operation.""" - self.add_security_groups(context, dbcontext, port) - try_del(port, ['network_id', 'id', 'status', 'mac_address', - 'tenant_id', 'fixed_ips']) - - create_object_map = {ODL_NETWORKS: filter_create_network_attributes, - ODL_SUBNETS: filter_create_subnet_attributes, - ODL_PORTS: filter_create_port_attributes} - - update_object_map = {ODL_NETWORKS: filter_update_network_attributes, - ODL_SUBNETS: filter_update_subnet_attributes, - ODL_PORTS: filter_update_port_attributes} - - def sync_single_resource(self, operation, object_type, obj_id, - context, attr_filter_create, attr_filter_update): - """Sync over a single resource from Neutron to OpenDaylight. - - Handle syncing a single operation over to OpenDaylight, and correctly - filter attributes out which are not required for the requisite - operation (create or update) being handled. - """ - dbcontext = context._plugin_context - if operation == 'create': - urlpath = object_type - method = 'post' - else: - urlpath = object_type + '/' + obj_id - method = 'put' - - try: - obj_getter = getattr(context._plugin, 'get_%s' % object_type[:-1]) - resource = obj_getter(dbcontext, obj_id) - except not_found_exception_map[object_type]: - LOG.debug(_('%(object_type)s not found (%(obj_id)s)'), - {'object_type': object_type.capitalize(), - 'obj_id': obj_id}) - else: - if operation == 'create': - attr_filter_create(self, resource, context, dbcontext) - elif operation == 'update': - attr_filter_update(self, resource, context, dbcontext) - try: - # 400 errors are returned if an object exists, which we ignore. - self.sendjson(method, urlpath, {object_type[:-1]: resource}, - [400]) - except Exception: - with excutils.save_and_reraise_exception(): - self.out_of_sync = True - - def sync_object(self, operation, object_type, context): - """Synchronize the single modified record to ODL.""" - obj_id = context.current['id'] - - self.sync_single_resource(operation, object_type, obj_id, context, - self.create_object_map[object_type], - self.update_object_map[object_type]) - - def add_security_groups(self, context, dbcontext, port): - """Populate the 'security_groups' field with entire records.""" - groups = [context._plugin.get_security_group(dbcontext, sg) - for sg in port['security_groups']] - port['security_groups'] = groups - - def sendjson(self, method, urlpath, obj, ignorecodes=[]): - """Send json to the OpenDaylight controller.""" - - headers = {'Content-Type': 'application/json'} - data = jsonutils.dumps(obj, indent=2) if obj else None - url = '/'.join([self.url, urlpath]) - LOG.debug(_('ODL-----> sending URL (%s) <-----ODL') % url) - LOG.debug(_('ODL-----> sending JSON (%s) <-----ODL') % obj) - r = requests.request(method, url=url, - headers=headers, data=data, - auth=self.auth, timeout=self.timeout) - - # ignorecodes contains a list of HTTP error codes to ignore. - if r.status_code in ignorecodes: - return - r.raise_for_status() - - def bind_port(self, context): - LOG.debug(_("Attempting to bind port %(port)s on " - "network %(network)s"), - {'port': context.current['id'], - 'network': context.network.current['id']}) - for segment in context.network.network_segments: - if self.check_segment(segment): - context.set_binding(segment[api.ID], - self.vif_type, - self.vif_details, - status=n_const.PORT_STATUS_ACTIVE) - LOG.debug(_("Bound using segment: %s"), segment) - return - else: - LOG.debug(_("Refusing to bind port for segment ID %(id)s, " - "segment %(seg)s, phys net %(physnet)s, and " - "network type %(nettype)s"), - {'id': segment[api.ID], - 'seg': segment[api.SEGMENTATION_ID], - 'physnet': segment[api.PHYSICAL_NETWORK], - 'nettype': segment[api.NETWORK_TYPE]}) - - def check_segment(self, segment): - """Verify a segment is valid for the OpenDaylight MechanismDriver. - - Verify the requested segment is supported by ODL and return True or - False to indicate this to callers. - """ - network_type = segment[api.NETWORK_TYPE] - return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE, - constants.TYPE_VXLAN, constants.TYPE_VLAN] diff --git a/neutron/plugins/ml2/drivers/mlnx/__init__.py b/neutron/plugins/ml2/drivers/mlnx/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ml2/drivers/mlnx/config.py b/neutron/plugins/ml2/drivers/mlnx/config.py deleted file mode 100644 index c9641d53c..000000000 --- a/neutron/plugins/ml2/drivers/mlnx/config.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - -from neutron.extensions import portbindings - -eswitch_opts = [ - cfg.StrOpt('vnic_type', - default=portbindings.VIF_TYPE_MLNX_DIRECT, - help=_("Type of VM network interface: mlnx_direct or " - "hostdev")), - cfg.BoolOpt('apply_profile_patch', - default=False, - help=_("Enable server compatibility with old nova")), -] - - -cfg.CONF.register_opts(eswitch_opts, "ESWITCH") diff --git a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py deleted file mode 100644 index 97eb03a4c..000000000 --- a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - -from neutron.common import constants -from neutron.extensions import portbindings -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import mech_agent -from neutron.plugins.ml2.drivers.mlnx import config # noqa - -LOG = log.getLogger(__name__) - - -class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): - """Attach to networks using Mellanox eSwitch L2 agent. - - The MellanoxMechanismDriver integrates the ml2 plugin with the - Mellanox eswitch L2 agent. Port binding with this driver requires the - Mellanox eswitch agent to be running on the port's host, and that agent - to have connectivity to at least one segment of the port's - network. - """ - - def __init__(self): - # REVISIT(irenab): update supported_vnic_types to contain - # only VNIC_DIRECT and VNIC_MACVTAP once its possible to specify - # vnic_type via nova API/GUI. Currently VNIC_NORMAL is included - # to enable VM creation via GUI. It should be noted, that if - # several MDs are capable to bing bind port on chosen host, the - # first listed MD will bind the port for VNIC_NORMAL. - super(MlnxMechanismDriver, self).__init__( - constants.AGENT_TYPE_MLNX, - cfg.CONF.ESWITCH.vnic_type, - {portbindings.CAP_PORT_FILTER: False}, - portbindings.VNIC_TYPES) - self.update_profile = cfg.CONF.ESWITCH.apply_profile_patch - - def check_segment_for_agent(self, segment, agent): - mappings = agent['configurations'].get('interface_mappings', {}) - LOG.debug(_("Checking segment: %(segment)s " - "for mappings: %(mappings)s "), - {'segment': segment, 'mappings': mappings}) - - network_type = segment[api.NETWORK_TYPE] - if network_type == 'local': - return True - elif network_type in ['flat', 'vlan']: - return segment[api.PHYSICAL_NETWORK] in mappings - else: - return False - - def try_to_bind_segment_for_agent(self, context, segment, agent): - if self.check_segment_for_agent(segment, agent): - vif_type = self._get_vif_type( - context.current[portbindings.VNIC_TYPE]) - if segment[api.NETWORK_TYPE] in ['flat', 'vlan']: - self.vif_details['physical_network'] = segment[ - 'physical_network'] - context.set_binding(segment[api.ID], - vif_type, - self.vif_details) - # REVISIT(irenab): Temporary solution till nova support - # will be merged for physical_network propagation - # via VIF object to VIFDriver (required by mlnx vif plugging). - if self.update_profile: - profile = {'physical_network': - segment['physical_network']} - context._binding.profile = jsonutils.dumps(profile) - - def _get_vif_type(self, requested_vnic_type): - if requested_vnic_type == portbindings.VNIC_MACVTAP: - return portbindings.VIF_TYPE_MLNX_DIRECT - elif requested_vnic_type == portbindings.VNIC_DIRECT: - return portbindings.VIF_TYPE_MLNX_HOSTDEV - return self.vif_type diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py deleted file mode 100644 index 3e736eabc..000000000 --- a/neutron/plugins/ml2/drivers/type_flat.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -import sqlalchemy as sa - -from neutron.common import exceptions as exc -from neutron.db import model_base -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -flat_opts = [ - cfg.ListOpt('flat_networks', - default=[], - help=_("List of physical_network names with which flat " - "networks can be created. Use * to allow flat " - "networks with arbitrary physical_network names.")) -] - -cfg.CONF.register_opts(flat_opts, "ml2_type_flat") - - -class FlatAllocation(model_base.BASEV2): - """Represent persistent allocation state of a physical network. - - If a record exists for a physical network, then that physical - network has been allocated as a flat network. - """ - - __tablename__ = 'ml2_flat_allocations' - - physical_network = sa.Column(sa.String(64), nullable=False, - primary_key=True) - - -class FlatTypeDriver(api.TypeDriver): - """Manage state for flat networks with ML2. - - The FlatTypeDriver implements the 'flat' network_type. Flat - network segments provide connectivity between VMs and other - devices using any connected IEEE 802.1D conformant - physical_network, without the use of VLAN tags, tunneling, or - other segmentation mechanisms. Therefore at most one flat network - segment can exist on each available physical_network. - """ - - def __init__(self): - self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) - - def _parse_networks(self, entries): - self.flat_networks = entries - if '*' in self.flat_networks: - LOG.info(_("Arbitrary flat physical_network names allowed")) - self.flat_networks = None - else: - # TODO(rkukura): Validate that each physical_network name - # is neither empty nor too long. - LOG.info(_("Allowable flat physical_network names: %s"), - self.flat_networks) - - def get_type(self): - return p_const.TYPE_FLAT - - def initialize(self): - LOG.info(_("ML2 FlatTypeDriver initialization complete")) - - def validate_provider_segment(self, segment): - physical_network = segment.get(api.PHYSICAL_NETWORK) - if not physical_network: - msg = _("physical_network required for flat provider network") - raise exc.InvalidInput(error_message=msg) - if self.flat_networks and physical_network not in self.flat_networks: - msg = (_("physical_network '%s' unknown for flat provider network") - % physical_network) - raise exc.InvalidInput(error_message=msg) - - for key, value in segment.iteritems(): - if value and key not in [api.NETWORK_TYPE, - api.PHYSICAL_NETWORK]: - msg = _("%s prohibited for flat provider network") % key - raise exc.InvalidInput(error_message=msg) - - def reserve_provider_segment(self, session, segment): - physical_network = segment[api.PHYSICAL_NETWORK] - with session.begin(subtransactions=True): - try: - alloc = (session.query(FlatAllocation). - filter_by(physical_network=physical_network). - with_lockmode('update'). - one()) - raise exc.FlatNetworkInUse( - physical_network=physical_network) - except sa.orm.exc.NoResultFound: - LOG.debug(_("Reserving flat network on physical " - "network %s"), physical_network) - alloc = FlatAllocation(physical_network=physical_network) - session.add(alloc) - - def allocate_tenant_segment(self, session): - # Tenant flat networks are not supported. - return - - def release_segment(self, session, segment): - physical_network = segment[api.PHYSICAL_NETWORK] - with session.begin(subtransactions=True): - try: - alloc = (session.query(FlatAllocation). - filter_by(physical_network=physical_network). - with_lockmode('update'). - one()) - session.delete(alloc) - LOG.debug(_("Releasing flat network on physical " - "network %s"), physical_network) - except sa.orm.exc.NoResultFound: - LOG.warning(_("No flat network found on physical network %s"), - physical_network) diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py deleted file mode 100644 index abd894bfe..000000000 --- a/neutron/plugins/ml2/drivers/type_gre.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -from six import moves -import sqlalchemy as sa -from sqlalchemy.orm import exc as sa_exc - -from neutron.common import exceptions as exc -from neutron.db import api as db_api -from neutron.db import model_base -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import type_tunnel - -LOG = log.getLogger(__name__) - -gre_opts = [ - cfg.ListOpt('tunnel_id_ranges', - default=[], - help=_("Comma-separated list of : tuples " - "enumerating ranges of GRE tunnel IDs that are " - "available for tenant network allocation")) -] - -cfg.CONF.register_opts(gre_opts, "ml2_type_gre") - - -class GreAllocation(model_base.BASEV2): - - __tablename__ = 'ml2_gre_allocations' - - gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False) - - -class GreEndpoints(model_base.BASEV2): - """Represents tunnel endpoint in RPC mode.""" - __tablename__ = 'ml2_gre_endpoints' - - ip_address = sa.Column(sa.String(64), primary_key=True) - - def __repr__(self): - return "" % self.ip_address - - -class GreTypeDriver(type_tunnel.TunnelTypeDriver): - - def get_type(self): - return p_const.TYPE_GRE - - def initialize(self): - self.gre_id_ranges = [] - self._parse_tunnel_ranges( - cfg.CONF.ml2_type_gre.tunnel_id_ranges, - self.gre_id_ranges, - p_const.TYPE_GRE - ) - self._sync_gre_allocations() - - def reserve_provider_segment(self, session, segment): - segmentation_id = segment.get(api.SEGMENTATION_ID) - with session.begin(subtransactions=True): - try: - alloc = (session.query(GreAllocation). - filter_by(gre_id=segmentation_id). - with_lockmode('update'). - one()) - if alloc.allocated: - raise exc.TunnelIdInUse(tunnel_id=segmentation_id) - LOG.debug(_("Reserving specific gre tunnel %s from pool"), - segmentation_id) - alloc.allocated = True - except sa_exc.NoResultFound: - LOG.debug(_("Reserving specific gre tunnel %s outside pool"), - segmentation_id) - alloc = GreAllocation(gre_id=segmentation_id) - alloc.allocated = True - session.add(alloc) - - def allocate_tenant_segment(self, session): - with session.begin(subtransactions=True): - alloc = (session.query(GreAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if alloc: - LOG.debug(_("Allocating gre tunnel id %(gre_id)s"), - {'gre_id': alloc.gre_id}) - alloc.allocated = True - return {api.NETWORK_TYPE: p_const.TYPE_GRE, - api.PHYSICAL_NETWORK: None, - api.SEGMENTATION_ID: alloc.gre_id} - - def release_segment(self, session, segment): - gre_id = segment[api.SEGMENTATION_ID] - with session.begin(subtransactions=True): - try: - alloc = (session.query(GreAllocation). - filter_by(gre_id=gre_id). - with_lockmode('update'). - one()) - alloc.allocated = False - for lo, hi in self.gre_id_ranges: - if lo <= gre_id <= hi: - LOG.debug(_("Releasing gre tunnel %s to pool"), - gre_id) - break - else: - session.delete(alloc) - LOG.debug(_("Releasing gre tunnel %s outside pool"), - gre_id) - except sa_exc.NoResultFound: - LOG.warning(_("gre_id %s not found"), gre_id) - - def _sync_gre_allocations(self): - """Synchronize gre_allocations table with configured tunnel ranges.""" - - # determine current configured allocatable gres - gre_ids = set() - for gre_id_range in self.gre_id_ranges: - tun_min, tun_max = gre_id_range - if tun_max + 1 - tun_min > 1000000: - LOG.error(_("Skipping unreasonable gre ID range " - "%(tun_min)s:%(tun_max)s"), - {'tun_min': tun_min, 'tun_max': tun_max}) - else: - gre_ids |= set(moves.xrange(tun_min, tun_max + 1)) - - session = db_api.get_session() - with session.begin(subtransactions=True): - # remove from table unallocated tunnels not currently allocatable - allocs = (session.query(GreAllocation).all()) - for alloc in allocs: - try: - # see if tunnel is allocatable - gre_ids.remove(alloc.gre_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing tunnel %s from pool"), - alloc.gre_id) - session.delete(alloc) - - # add missing allocatable tunnels to table - for gre_id in sorted(gre_ids): - alloc = GreAllocation(gre_id=gre_id) - session.add(alloc) - - def get_gre_allocation(self, session, gre_id): - return session.query(GreAllocation).filter_by(gre_id=gre_id).first() - - def get_endpoints(self): - """Get every gre endpoints from database.""" - - LOG.debug(_("get_gre_endpoints() called")) - session = db_api.get_session() - - with session.begin(subtransactions=True): - gre_endpoints = session.query(GreEndpoints) - return [{'ip_address': gre_endpoint.ip_address} - for gre_endpoint in gre_endpoints] - - def add_endpoint(self, ip): - LOG.debug(_("add_gre_endpoint() called for ip %s"), ip) - session = db_api.get_session() - with session.begin(subtransactions=True): - try: - gre_endpoint = (session.query(GreEndpoints). - filter_by(ip_address=ip).one()) - LOG.warning(_("Gre endpoint with ip %s already exists"), ip) - except sa_exc.NoResultFound: - gre_endpoint = GreEndpoints(ip_address=ip) - session.add(gre_endpoint) - return gre_endpoint diff --git a/neutron/plugins/ml2/drivers/type_local.py b/neutron/plugins/ml2/drivers/type_local.py deleted file mode 100644 index e0281a245..000000000 --- a/neutron/plugins/ml2/drivers/type_local.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import exceptions as exc -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - - -class LocalTypeDriver(api.TypeDriver): - """Manage state for local networks with ML2. - - The LocalTypeDriver implements the 'local' network_type. Local - network segments provide connectivity between VMs and other - devices running on the same node, provided that a common local - network bridging technology is available to those devices. Local - network segments do not provide any connectivity between nodes. - """ - - def __init__(self): - LOG.info(_("ML2 LocalTypeDriver initialization complete")) - - def get_type(self): - return p_const.TYPE_LOCAL - - def initialize(self): - pass - - def validate_provider_segment(self, segment): - for key, value in segment.iteritems(): - if value and key not in [api.NETWORK_TYPE]: - msg = _("%s prohibited for local provider network") % key - raise exc.InvalidInput(error_message=msg) - - def reserve_provider_segment(self, session, segment): - # No resources to reserve - pass - - def allocate_tenant_segment(self, session): - # No resources to allocate - return {api.NETWORK_TYPE: p_const.TYPE_LOCAL} - - def release_segment(self, session, segment): - # No resources to release - pass diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py deleted file mode 100644 index e209029b9..000000000 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc -import six - -from neutron.common import exceptions as exc -from neutron.common import topics -from neutron.openstack.common import log -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -TUNNEL = 'tunnel' - - -@six.add_metaclass(abc.ABCMeta) -class TunnelTypeDriver(api.TypeDriver): - """Define stable abstract interface for ML2 type drivers. - - tunnel type networks rely on tunnel endpoints. This class defines abstract - methods to manage these endpoints. - """ - - @abc.abstractmethod - def add_endpoint(self, ip): - """Register the endpoint in the type_driver database. - - param ip: the ip of the endpoint - """ - pass - - @abc.abstractmethod - def get_endpoints(self): - """Get every endpoint managed by the type_driver - - :returns a list of dict [{id:endpoint_id, ip_address:endpoint_ip},..] - """ - pass - - def _parse_tunnel_ranges(self, tunnel_ranges, current_range, tunnel_type): - for entry in tunnel_ranges: - entry = entry.strip() - try: - tun_min, tun_max = entry.split(':') - tun_min = tun_min.strip() - tun_max = tun_max.strip() - current_range.append((int(tun_min), int(tun_max))) - except ValueError as ex: - LOG.error(_("Invalid tunnel ID range: '%(range)s' - %(e)s. " - "Agent terminated!"), - {'range': tunnel_ranges, 'e': ex}) - LOG.info(_("%(type)s ID ranges: %(range)s"), - {'type': tunnel_type, 'range': current_range}) - - def validate_provider_segment(self, segment): - physical_network = segment.get(api.PHYSICAL_NETWORK) - if physical_network: - msg = _("provider:physical_network specified for %s " - "network") % segment.get(api.NETWORK_TYPE) - raise exc.InvalidInput(error_message=msg) - - segmentation_id = segment.get(api.SEGMENTATION_ID) - if not segmentation_id: - msg = _("segmentation_id required for %s provider " - "network") % segment.get(api.NETWORK_TYPE) - raise exc.InvalidInput(error_message=msg) - - for key, value in segment.items(): - if value and key not in [api.NETWORK_TYPE, - api.SEGMENTATION_ID]: - msg = (_("%(key)s prohibited for %(tunnel)s provider network"), - {'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)}) - raise exc.InvalidInput(error_message=msg) - - -class TunnelRpcCallbackMixin(object): - - def __init__(self, notifier, type_manager): - self.notifier = notifier - self.type_manager = type_manager - - def tunnel_sync(self, rpc_context, **kwargs): - """Update new tunnel. - - Updates the database with the tunnel IP. All listening agents will also - be notified about the new tunnel IP. - """ - tunnel_ip = kwargs.get('tunnel_ip') - tunnel_type = kwargs.get('tunnel_type') - if not tunnel_type: - msg = _("Network_type value needed by the ML2 plugin") - raise exc.InvalidInput(error_message=msg) - driver = self.type_manager.drivers.get(tunnel_type) - if driver: - tunnel = driver.obj.add_endpoint(tunnel_ip) - tunnels = driver.obj.get_endpoints() - entry = {'tunnels': tunnels} - # Notify all other listening agents - self.notifier.tunnel_update(rpc_context, tunnel.ip_address, - tunnel_type) - # Return the list of tunnels IP's to the agent - return entry - else: - msg = _("network_type value '%s' not supported") % tunnel_type - raise exc.InvalidInput(error_message=msg) - - -class TunnelAgentRpcApiMixin(object): - - def _get_tunnel_update_topic(self): - return topics.get_topic_name(self.topic, - TUNNEL, - topics.UPDATE) - - def tunnel_update(self, context, tunnel_ip, tunnel_type): - self.fanout_cast(context, - self.make_msg('tunnel_update', - tunnel_ip=tunnel_ip, - tunnel_type=tunnel_type), - topic=self._get_tunnel_update_topic()) diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py deleted file mode 100644 index 0159d5713..000000000 --- a/neutron/plugins/ml2/drivers/type_vlan.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo.config import cfg -from six import moves -import sqlalchemy as sa - -from neutron.common import constants as q_const -from neutron.common import exceptions as exc -from neutron.common import utils -from neutron.db import api as db_api -from neutron.db import model_base -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.common import utils as plugin_utils -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -vlan_opts = [ - cfg.ListOpt('network_vlan_ranges', - default=[], - help=_("List of :: or " - " specifying physical_network names " - "usable for VLAN provider and tenant networks, as " - "well as ranges of VLAN tags on each available for " - "allocation to tenant networks.")) -] - -cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan") - - -class VlanAllocation(model_base.BASEV2): - """Represent allocation state of a vlan_id on a physical network. - - If allocated is False, the vlan_id on the physical_network is - available for allocation to a tenant network. If allocated is - True, the vlan_id on the physical_network is in use, either as a - tenant or provider network. - - When an allocation is released, if the vlan_id for the - physical_network is inside the pool described by - VlanTypeDriver.network_vlan_ranges, then allocated is set to - False. If it is outside the pool, the record is deleted. - """ - - __tablename__ = 'ml2_vlan_allocations' - - physical_network = sa.Column(sa.String(64), nullable=False, - primary_key=True) - vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False) - - -class VlanTypeDriver(api.TypeDriver): - """Manage state for VLAN networks with ML2. - - The VlanTypeDriver implements the 'vlan' network_type. VLAN - network segments provide connectivity between VMs and other - devices using any connected IEEE 802.1Q conformant - physical_network segmented into virtual networks via IEEE 802.1Q - headers. Up to 4094 VLAN network segments can exist on each - available physical_network. - """ - - def __init__(self): - self._parse_network_vlan_ranges() - - def _parse_network_vlan_ranges(self): - try: - self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( - cfg.CONF.ml2_type_vlan.network_vlan_ranges) - # TODO(rkukura): Validate that each physical_network name - # is neither empty nor too long. - except Exception: - LOG.exception(_("Failed to parse network_vlan_ranges. " - "Service terminated!")) - sys.exit(1) - LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) - - def _sync_vlan_allocations(self): - session = db_api.get_session() - with session.begin(subtransactions=True): - # get existing allocations for all physical networks - allocations = dict() - allocs = (session.query(VlanAllocation). - with_lockmode('update')) - for alloc in allocs: - if alloc.physical_network not in allocations: - allocations[alloc.physical_network] = set() - allocations[alloc.physical_network].add(alloc) - - # process vlan ranges for each configured physical network - for (physical_network, - vlan_ranges) in self.network_vlan_ranges.items(): - # determine current configured allocatable vlans for - # this physical network - vlan_ids = set() - for vlan_min, vlan_max in vlan_ranges: - vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1)) - - # remove from table unallocated vlans not currently - # allocatable - if physical_network in allocations: - for alloc in allocations[physical_network]: - try: - # see if vlan is allocatable - vlan_ids.remove(alloc.vlan_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing vlan %(vlan_id)s on " - "physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': - physical_network}) - session.delete(alloc) - del allocations[physical_network] - - # add missing allocatable vlans to table - for vlan_id in sorted(vlan_ids): - alloc = VlanAllocation(physical_network=physical_network, - vlan_id=vlan_id, - allocated=False) - session.add(alloc) - - # remove from table unallocated vlans for any unconfigured - # physical networks - for allocs in allocations.itervalues(): - for alloc in allocs: - if not alloc.allocated: - LOG.debug(_("Removing vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': - alloc.physical_network}) - session.delete(alloc) - - def get_type(self): - return p_const.TYPE_VLAN - - def initialize(self): - self._sync_vlan_allocations() - LOG.info(_("VlanTypeDriver initialization complete")) - - def validate_provider_segment(self, segment): - physical_network = segment.get(api.PHYSICAL_NETWORK) - if not physical_network: - msg = _("physical_network required for VLAN provider network") - raise exc.InvalidInput(error_message=msg) - if physical_network not in self.network_vlan_ranges: - msg = (_("physical_network '%s' unknown for VLAN provider network") - % physical_network) - raise exc.InvalidInput(error_message=msg) - - segmentation_id = segment.get(api.SEGMENTATION_ID) - if segmentation_id is None: - msg = _("segmentation_id required for VLAN provider network") - raise exc.InvalidInput(error_message=msg) - if not utils.is_valid_vlan_tag(segmentation_id): - msg = (_("segmentation_id out of range (%(min)s through " - "%(max)s)") % - {'min': q_const.MIN_VLAN_TAG, - 'max': q_const.MAX_VLAN_TAG}) - raise exc.InvalidInput(error_message=msg) - - for key, value in segment.items(): - if value and key not in [api.NETWORK_TYPE, - api.PHYSICAL_NETWORK, - api.SEGMENTATION_ID]: - msg = _("%s prohibited for VLAN provider network") % key - raise exc.InvalidInput(error_message=msg) - - def reserve_provider_segment(self, session, segment): - physical_network = segment[api.PHYSICAL_NETWORK] - vlan_id = segment[api.SEGMENTATION_ID] - with session.begin(subtransactions=True): - try: - alloc = (session.query(VlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - if alloc.allocated: - raise exc.VlanIdInUse(vlan_id=vlan_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - alloc.allocated = True - except sa.orm.exc.NoResultFound: - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - alloc = VlanAllocation(physical_network=physical_network, - vlan_id=vlan_id, - allocated=True) - session.add(alloc) - - def allocate_tenant_segment(self, session): - with session.begin(subtransactions=True): - alloc = (session.query(VlanAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if alloc: - LOG.debug(_("Allocating vlan %(vlan_id)s on physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': alloc.physical_network}) - alloc.allocated = True - return {api.NETWORK_TYPE: p_const.TYPE_VLAN, - api.PHYSICAL_NETWORK: alloc.physical_network, - api.SEGMENTATION_ID: alloc.vlan_id} - - def release_segment(self, session, segment): - physical_network = segment[api.PHYSICAL_NETWORK] - vlan_id = segment[api.SEGMENTATION_ID] - with session.begin(subtransactions=True): - try: - alloc = (session.query(VlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - alloc.allocated = False - inside = False - for vlan_min, vlan_max in self.network_vlan_ranges.get( - physical_network, []): - if vlan_min <= vlan_id <= vlan_max: - inside = True - break - if not inside: - session.delete(alloc) - LOG.debug(_("Releasing vlan %(vlan_id)s on physical " - "network %(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - else: - LOG.debug(_("Releasing vlan %(vlan_id)s on physical " - "network %(physical_network)s to pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - except sa.orm.exc.NoResultFound: - LOG.warning(_("No vlan_id %(vlan_id)s found on physical " - "network %(physical_network)s"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py deleted file mode 100644 index 3e5d47567..000000000 --- a/neutron/plugins/ml2/drivers/type_vxlan.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Kyle Mestery, Cisco Systems, Inc. - -from oslo.config import cfg -import sqlalchemy as sa -from sqlalchemy.orm import exc as sa_exc - -from neutron.common import exceptions as exc -from neutron.db import api as db_api -from neutron.db import model_base -from neutron.openstack.common import log -from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import type_tunnel - -LOG = log.getLogger(__name__) - -VXLAN_UDP_PORT = 4789 -MAX_VXLAN_VNI = 16777215 - -vxlan_opts = [ - cfg.ListOpt('vni_ranges', - default=[], - help=_("Comma-separated list of : tuples " - "enumerating ranges of VXLAN VNI IDs that are " - "available for tenant network allocation")), - cfg.StrOpt('vxlan_group', - help=_("Multicast group for VXLAN. If unset, disables VXLAN " - "multicast mode.")), -] - -cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan") - - -class VxlanAllocation(model_base.BASEV2): - - __tablename__ = 'ml2_vxlan_allocations' - - vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False) - - -class VxlanEndpoints(model_base.BASEV2): - """Represents tunnel endpoint in RPC mode.""" - __tablename__ = 'ml2_vxlan_endpoints' - - ip_address = sa.Column(sa.String(64), primary_key=True) - udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False, - autoincrement=False) - - def __repr__(self): - return "" % self.ip_address - - -class VxlanTypeDriver(type_tunnel.TunnelTypeDriver): - - def get_type(self): - return p_const.TYPE_VXLAN - - def initialize(self): - self.vxlan_vni_ranges = [] - self._parse_tunnel_ranges( - cfg.CONF.ml2_type_vxlan.vni_ranges, - self.vxlan_vni_ranges, - p_const.TYPE_VXLAN - ) - self._sync_vxlan_allocations() - - def reserve_provider_segment(self, session, segment): - segmentation_id = segment.get(api.SEGMENTATION_ID) - with session.begin(subtransactions=True): - try: - alloc = (session.query(VxlanAllocation). - filter_by(vxlan_vni=segmentation_id). - with_lockmode('update'). - one()) - if alloc.allocated: - raise exc.TunnelIdInUse(tunnel_id=segmentation_id) - LOG.debug(_("Reserving specific vxlan tunnel %s from pool"), - segmentation_id) - alloc.allocated = True - except sa_exc.NoResultFound: - LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"), - segmentation_id) - alloc = VxlanAllocation(vxlan_vni=segmentation_id) - alloc.allocated = True - session.add(alloc) - - def allocate_tenant_segment(self, session): - with session.begin(subtransactions=True): - alloc = (session.query(VxlanAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if alloc: - LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"), - {'vxlan_vni': alloc.vxlan_vni}) - alloc.allocated = True - return {api.NETWORK_TYPE: p_const.TYPE_VXLAN, - api.PHYSICAL_NETWORK: None, - api.SEGMENTATION_ID: alloc.vxlan_vni} - - def release_segment(self, session, segment): - vxlan_vni = segment[api.SEGMENTATION_ID] - with session.begin(subtransactions=True): - try: - alloc = (session.query(VxlanAllocation). - filter_by(vxlan_vni=vxlan_vni). - with_lockmode('update'). - one()) - alloc.allocated = False - for low, high in self.vxlan_vni_ranges: - if low <= vxlan_vni <= high: - LOG.debug(_("Releasing vxlan tunnel %s to pool"), - vxlan_vni) - break - else: - session.delete(alloc) - LOG.debug(_("Releasing vxlan tunnel %s outside pool"), - vxlan_vni) - except sa_exc.NoResultFound: - LOG.warning(_("vxlan_vni %s not found"), vxlan_vni) - - def _sync_vxlan_allocations(self): - """ - Synchronize vxlan_allocations table with configured tunnel ranges. - """ - - # determine current configured allocatable vnis - vxlan_vnis = set() - for tun_min, tun_max in self.vxlan_vni_ranges: - if tun_max + 1 - tun_min > MAX_VXLAN_VNI: - LOG.error(_("Skipping unreasonable VXLAN VNI range " - "%(tun_min)s:%(tun_max)s"), - {'tun_min': tun_min, 'tun_max': tun_max}) - else: - vxlan_vnis |= set(xrange(tun_min, tun_max + 1)) - - session = db_api.get_session() - with session.begin(subtransactions=True): - # remove from table unallocated tunnels not currently allocatable - allocs = session.query(VxlanAllocation).with_lockmode("update") - for alloc in allocs: - try: - # see if tunnel is allocatable - vxlan_vnis.remove(alloc.vxlan_vni) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing tunnel %s from pool"), - alloc.vxlan_vni) - session.delete(alloc) - - # add missing allocatable tunnels to table - for vxlan_vni in sorted(vxlan_vnis): - alloc = VxlanAllocation(vxlan_vni=vxlan_vni) - session.add(alloc) - - def get_vxlan_allocation(self, session, vxlan_vni): - with session.begin(subtransactions=True): - return session.query(VxlanAllocation).filter_by( - vxlan_vni=vxlan_vni).first() - - def get_endpoints(self): - """Get every vxlan endpoints from database.""" - - LOG.debug(_("get_vxlan_endpoints() called")) - session = db_api.get_session() - - with session.begin(subtransactions=True): - vxlan_endpoints = session.query(VxlanEndpoints) - return [{'ip_address': vxlan_endpoint.ip_address, - 'udp_port': vxlan_endpoint.udp_port} - for vxlan_endpoint in vxlan_endpoints] - - def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT): - LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip) - session = db_api.get_session() - with session.begin(subtransactions=True): - try: - vxlan_endpoint = (session.query(VxlanEndpoints). - filter_by(ip_address=ip). - with_lockmode('update').one()) - except sa_exc.NoResultFound: - vxlan_endpoint = VxlanEndpoints(ip_address=ip, - udp_port=udp_port) - session.add(vxlan_endpoint) - return vxlan_endpoint diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py deleted file mode 100644 index 13df6732e..000000000 --- a/neutron/plugins/ml2/managers.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg -import stevedore - -from neutron.common import exceptions as exc -from neutron.extensions import portbindings -from neutron.openstack.common import log -from neutron.plugins.ml2.common import exceptions as ml2_exc -from neutron.plugins.ml2 import driver_api as api - - -LOG = log.getLogger(__name__) - - -class TypeManager(stevedore.named.NamedExtensionManager): - """Manage network segment types using drivers.""" - - def __init__(self): - # Mapping from type name to DriverManager - self.drivers = {} - - LOG.info(_("Configured type driver names: %s"), - cfg.CONF.ml2.type_drivers) - super(TypeManager, self).__init__('neutron.ml2.type_drivers', - cfg.CONF.ml2.type_drivers, - invoke_on_load=True) - LOG.info(_("Loaded type driver names: %s"), self.names()) - self._register_types() - self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) - - def _register_types(self): - for ext in self: - network_type = ext.obj.get_type() - if network_type in self.drivers: - LOG.error(_("Type driver '%(new_driver)s' ignored because type" - " driver '%(old_driver)s' is already registered" - " for type '%(type)s'"), - {'new_driver': ext.name, - 'old_driver': self.drivers[network_type].name, - 'type': network_type}) - else: - self.drivers[network_type] = ext - LOG.info(_("Registered types: %s"), self.drivers.keys()) - - def _check_tenant_network_types(self, types): - self.tenant_network_types = [] - for network_type in types: - if network_type in self.drivers: - self.tenant_network_types.append(network_type) - else: - msg = _("No type driver for tenant network_type: %s. " - "Service terminated!") % network_type - LOG.error(msg) - raise SystemExit(1) - LOG.info(_("Tenant network_types: %s"), self.tenant_network_types) - - def initialize(self): - for network_type, driver in self.drivers.iteritems(): - LOG.info(_("Initializing driver for type '%s'"), network_type) - driver.obj.initialize() - - def validate_provider_segment(self, segment): - network_type = segment[api.NETWORK_TYPE] - driver = self.drivers.get(network_type) - if driver: - driver.obj.validate_provider_segment(segment) - else: - msg = _("network_type value '%s' not supported") % network_type - raise exc.InvalidInput(error_message=msg) - - def reserve_provider_segment(self, session, segment): - network_type = segment.get(api.NETWORK_TYPE) - driver = self.drivers.get(network_type) - driver.obj.reserve_provider_segment(session, segment) - - def allocate_tenant_segment(self, session): - for network_type in self.tenant_network_types: - driver = self.drivers.get(network_type) - segment = driver.obj.allocate_tenant_segment(session) - if segment: - return segment - raise exc.NoNetworkAvailable() - - def release_segment(self, session, segment): - network_type = segment.get(api.NETWORK_TYPE) - driver = self.drivers.get(network_type) - # ML2 may have been reconfigured since the segment was created, - # so a driver may no longer exist for this network_type. - # REVISIT: network_type-specific db entries may become orphaned - # if a network is deleted and the driver isn't available to release - # the segment. This may be fixed with explicit foreign-key references - # or consistency checks on driver initialization. - if not driver: - LOG.error(_("Failed to release segment '%s' because " - "network type is not supported."), segment) - return - driver.obj.release_segment(session, segment) - - -class MechanismManager(stevedore.named.NamedExtensionManager): - """Manage networking mechanisms using drivers.""" - - def __init__(self): - # Registered mechanism drivers, keyed by name. - self.mech_drivers = {} - # Ordered list of mechanism drivers, defining - # the order in which the drivers are called. - self.ordered_mech_drivers = [] - - LOG.info(_("Configured mechanism driver names: %s"), - cfg.CONF.ml2.mechanism_drivers) - super(MechanismManager, self).__init__('neutron.ml2.mechanism_drivers', - cfg.CONF.ml2.mechanism_drivers, - invoke_on_load=True, - name_order=True) - LOG.info(_("Loaded mechanism driver names: %s"), self.names()) - self._register_mechanisms() - - def _register_mechanisms(self): - """Register all mechanism drivers. - - This method should only be called once in the MechanismManager - constructor. - """ - for ext in self: - self.mech_drivers[ext.name] = ext - self.ordered_mech_drivers.append(ext) - LOG.info(_("Registered mechanism drivers: %s"), - [driver.name for driver in self.ordered_mech_drivers]) - - def initialize(self): - # For ML2 to support bulk operations, each driver must support them - self.native_bulk_support = True - for driver in self.ordered_mech_drivers: - LOG.info(_("Initializing mechanism driver '%s'"), driver.name) - driver.obj.initialize() - self.native_bulk_support &= getattr(driver.obj, - 'native_bulk_support', True) - - def _call_on_drivers(self, method_name, context, - continue_on_failure=False): - """Helper method for calling a method across all mechanism drivers. - - :param method_name: name of the method to call - :param context: context parameter to pass to each method call - :param continue_on_failure: whether or not to continue to call - all mechanism drivers once one has raised an exception - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver call fails. - """ - error = False - for driver in self.ordered_mech_drivers: - try: - getattr(driver.obj, method_name)(context) - except Exception: - LOG.exception( - _("Mechanism driver '%(name)s' failed in %(method)s"), - {'name': driver.name, 'method': method_name} - ) - error = True - if not continue_on_failure: - break - if error: - raise ml2_exc.MechanismDriverError( - method=method_name - ) - - def create_network_precommit(self, context): - """Notify all mechanism drivers during network creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_network_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("create_network_precommit", context) - - def create_network_postcommit(self, context): - """Notify all mechanism drivers after network creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_network_postcommit call fails. - - Called after the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propagated - to the caller, where the network will be deleted, triggering - any required cleanup. There is no guarantee that all mechanism - drivers are called in this case. - """ - self._call_on_drivers("create_network_postcommit", context) - - def update_network_precommit(self, context): - """Notify all mechanism drivers during network update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_network_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("update_network_precommit", context) - - def update_network_postcommit(self, context): - """Notify all mechanism drivers after network update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_network_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. - """ - self._call_on_drivers("update_network_postcommit", context, - continue_on_failure=True) - - def delete_network_precommit(self, context): - """Notify all mechanism drivers during network deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_network_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("delete_network_precommit", context) - - def delete_network_postcommit(self, context): - """Notify all mechanism drivers after network deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_network_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. In - general we expect the caller to ignore the error, as the - network resource has already been deleted from the database - and it doesn't make sense to undo the action by recreating the - network. - """ - self._call_on_drivers("delete_network_postcommit", context, - continue_on_failure=True) - - def create_subnet_precommit(self, context): - """Notify all mechanism drivers during subnet creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_subnet_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("create_subnet_precommit", context) - - def create_subnet_postcommit(self, context): - """Notify all mechanism drivers after subnet creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_subnet_postcommit call fails. - - Called after the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propagated - to the caller, where the subnet will be deleted, triggering - any required cleanup. There is no guarantee that all mechanism - drivers are called in this case. - """ - self._call_on_drivers("create_subnet_postcommit", context) - - def update_subnet_precommit(self, context): - """Notify all mechanism drivers during subnet update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_subnet_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("update_subnet_precommit", context) - - def update_subnet_postcommit(self, context): - """Notify all mechanism drivers after subnet update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_subnet_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. - """ - self._call_on_drivers("update_subnet_postcommit", context, - continue_on_failure=True) - - def delete_subnet_precommit(self, context): - """Notify all mechanism drivers during subnet deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_subnet_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("delete_subnet_precommit", context) - - def delete_subnet_postcommit(self, context): - """Notify all mechanism drivers after subnet deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_subnet_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. In - general we expect the caller to ignore the error, as the - subnet resource has already been deleted from the database - and it doesn't make sense to undo the action by recreating the - subnet. - """ - self._call_on_drivers("delete_subnet_postcommit", context, - continue_on_failure=True) - - def create_port_precommit(self, context): - """Notify all mechanism drivers during port creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_port_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("create_port_precommit", context) - - def create_port_postcommit(self, context): - """Notify all mechanism drivers of port creation. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver create_port_postcommit call fails. - - Called after the database transaction. Errors raised by - mechanism drivers are left to propagate to the caller, where - the port will be deleted, triggering any required - cleanup. There is no guarantee that all mechanism drivers are - called in this case. - """ - self._call_on_drivers("create_port_postcommit", context) - - def update_port_precommit(self, context): - """Notify all mechanism drivers during port update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_port_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("update_port_precommit", context) - - def update_port_postcommit(self, context): - """Notify all mechanism drivers after port update. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver update_port_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. - """ - self._call_on_drivers("update_port_postcommit", context, - continue_on_failure=True) - - def delete_port_precommit(self, context): - """Notify all mechanism drivers during port deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_port_precommit call fails. - - Called within the database transaction. If a mechanism driver - raises an exception, then a MechanismDriverError is propogated - to the caller, triggering a rollback. There is no guarantee - that all mechanism drivers are called in this case. - """ - self._call_on_drivers("delete_port_precommit", context) - - def delete_port_postcommit(self, context): - """Notify all mechanism drivers after port deletion. - - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver delete_port_postcommit call fails. - - Called after the database transaction. If any mechanism driver - raises an error, then the error is logged but we continue to - call every other mechanism driver. A MechanismDriverError is - then reraised at the end to notify the caller of a failure. In - general we expect the caller to ignore the error, as the - port resource has already been deleted from the database - and it doesn't make sense to undo the action by recreating the - port. - """ - self._call_on_drivers("delete_port_postcommit", context, - continue_on_failure=True) - - def bind_port(self, context): - """Attempt to bind a port using registered mechanism drivers. - - :param context: PortContext instance describing the port - - Called inside transaction context on session, prior to - create_port_precommit or update_port_precommit, to - attempt to establish a port binding. - """ - binding = context._binding - LOG.debug(_("Attempting to bind port %(port)s on host %(host)s " - "for vnic_type %(vnic_type)s with profile %(profile)s"), - {'port': context._port['id'], - 'host': binding.host, - 'vnic_type': binding.vnic_type, - 'profile': binding.profile}) - for driver in self.ordered_mech_drivers: - try: - driver.obj.bind_port(context) - if binding.segment: - binding.driver = driver.name - LOG.debug(_("Bound port: %(port)s, host: %(host)s, " - "vnic_type: %(vnic_type)s, " - "profile: %(profile)s" - "driver: %(driver)s, vif_type: %(vif_type)s, " - "vif_details: %(vif_details)s, " - "segment: %(segment)s"), - {'port': context._port['id'], - 'host': binding.host, - 'vnic_type': binding.vnic_type, - 'profile': binding.profile, - 'driver': binding.driver, - 'vif_type': binding.vif_type, - 'vif_details': binding.vif_details, - 'segment': binding.segment}) - return - except Exception: - LOG.exception(_("Mechanism driver %s failed in " - "bind_port"), - driver.name) - binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED - LOG.warning(_("Failed to bind port %(port)s on host %(host)s"), - {'port': context._port['id'], - 'host': binding.host}) diff --git a/neutron/plugins/ml2/models.py b/neutron/plugins/ml2/models.py deleted file mode 100644 index 0ab805f1c..000000000 --- a/neutron/plugins/ml2/models.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.extensions import portbindings - -BINDING_PROFILE_LEN = 4095 - - -class NetworkSegment(model_base.BASEV2, models_v2.HasId): - """Represent persistent state of a network segment. - - A network segment is a portion of a neutron network with a - specific physical realization. A neutron network can consist of - one or more segments. - """ - - __tablename__ = 'ml2_network_segments' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - nullable=False) - network_type = sa.Column(sa.String(32), nullable=False) - physical_network = sa.Column(sa.String(64)) - segmentation_id = sa.Column(sa.Integer) - - -class PortBinding(model_base.BASEV2): - """Represent binding-related state of a port. - - A port binding stores the port attributes required for the - portbindings extension, as well as internal ml2 state such as - which MechanismDriver and which segment are used by the port - binding. - """ - - __tablename__ = 'ml2_port_bindings' - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - host = sa.Column(sa.String(255), nullable=False, default='') - vnic_type = sa.Column(sa.String(64), nullable=False, - default=portbindings.VNIC_NORMAL) - profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, - default='') - vif_type = sa.Column(sa.String(64), nullable=False) - vif_details = sa.Column(sa.String(4095), nullable=False, default='') - driver = sa.Column(sa.String(64)) - segment = sa.Column(sa.String(36), - sa.ForeignKey('ml2_network_segments.id', - ondelete="SET NULL")) - - # Add a relationship to the Port model in order to instruct SQLAlchemy to - # eagerly load port bindings - port = orm.relationship( - models_v2.Port, - backref=orm.backref("port_binding", - lazy='joined', uselist=False, - cascade='delete')) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py deleted file mode 100644 index a324637c5..000000000 --- a/neutron/plugins/ml2/plugin.py +++ /dev/null @@ -1,791 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import contextlib - -from oslo.config import cfg -from sqlalchemy import exc as sql_exc -from sqlalchemy.orm import exc as sa_exc - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants as const -from neutron.common import exceptions as exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extradhcpopt_db -from neutron.db import models_v2 -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import extra_dhcp_opt as edo_ext -from neutron.extensions import multiprovidernet as mpnet -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron import manager -from neutron.openstack.common import db as os_db -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import lockutils -from neutron.openstack.common import log -from neutron.plugins.common import constants as service_constants -from neutron.plugins.ml2.common import exceptions as ml2_exc -from neutron.plugins.ml2 import config # noqa -from neutron.plugins.ml2 import db -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2 import driver_context -from neutron.plugins.ml2 import managers -from neutron.plugins.ml2 import models -from neutron.plugins.ml2 import rpc - -LOG = log.getLogger(__name__) - -# REVISIT(rkukura): Move this and other network_type constants to -# providernet.py? -TYPE_MULTI_SEGMENT = 'multi-segment' - - -class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - addr_pair_db.AllowedAddressPairsMixin, - extradhcpopt_db.ExtraDhcpOptMixin): - - """Implement the Neutron L2 abstractions using modules. - - Ml2Plugin is a Neutron plugin based on separately extensible sets - of network types and mechanisms for connecting to networks of - those types. The network types and mechanisms are implemented as - drivers loaded via Python entry points. Networks can be made up of - multiple segments (not yet fully implemented). - """ - - # This attribute specifies whether the plugin supports or not - # bulk/pagination/sorting operations. Name mangling is used in - # order to ensure it is qualified by class - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - # List of supported extensions - _supported_extension_aliases = ["provider", "external-net", "binding", - "quotas", "security-group", "agent", - "dhcp_agent_scheduler", - "multi-provider", "allowed-address-pairs", - "extra_dhcp_opt"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - # First load drivers, then initialize DB, then initialize drivers - self.type_manager = managers.TypeManager() - self.mechanism_manager = managers.MechanismManager() - super(Ml2Plugin, self).__init__() - self.type_manager.initialize() - self.mechanism_manager.initialize() - # bulk support depends on the underlying drivers - self.__native_bulk_support = self.mechanism_manager.native_bulk_support - - self._setup_rpc() - - # REVISIT(rkukura): Use stevedore for these? - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - - LOG.info(_("Modular L2 Plugin initialization complete")) - - def _setup_rpc(self): - self.notifier = rpc.AgentNotifierApi(topics.AGENT) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - - def start_rpc_listeners(self): - self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager), - agents_db.AgentExtRpcCallback()] - self.topic = topics.PLUGIN - self.conn = rpc_compat.create_connection(new=True) - self.conn.create_consumer(self.topic, self.endpoints, - fanout=False) - return self.conn.consume_in_threads() - - def _process_provider_segment(self, segment): - network_type = self._get_attribute(segment, provider.NETWORK_TYPE) - physical_network = self._get_attribute(segment, - provider.PHYSICAL_NETWORK) - segmentation_id = self._get_attribute(segment, - provider.SEGMENTATION_ID) - - if attributes.is_attr_set(network_type): - segment = {api.NETWORK_TYPE: network_type, - api.PHYSICAL_NETWORK: physical_network, - api.SEGMENTATION_ID: segmentation_id} - self.type_manager.validate_provider_segment(segment) - return segment - - msg = _("network_type required") - raise exc.InvalidInput(error_message=msg) - - def _process_provider_create(self, network): - segments = [] - - if any(attributes.is_attr_set(network.get(f)) - for f in (provider.NETWORK_TYPE, provider.PHYSICAL_NETWORK, - provider.SEGMENTATION_ID)): - # Verify that multiprovider and provider attributes are not set - # at the same time. - if attributes.is_attr_set(network.get(mpnet.SEGMENTS)): - raise mpnet.SegmentsSetInConjunctionWithProviders() - - network_type = self._get_attribute(network, provider.NETWORK_TYPE) - physical_network = self._get_attribute(network, - provider.PHYSICAL_NETWORK) - segmentation_id = self._get_attribute(network, - provider.SEGMENTATION_ID) - segments = [{provider.NETWORK_TYPE: network_type, - provider.PHYSICAL_NETWORK: physical_network, - provider.SEGMENTATION_ID: segmentation_id}] - elif attributes.is_attr_set(network.get(mpnet.SEGMENTS)): - segments = network[mpnet.SEGMENTS] - else: - return - - return [self._process_provider_segment(s) for s in segments] - - def _get_attribute(self, attrs, key): - value = attrs.get(key) - if value is attributes.ATTR_NOT_SPECIFIED: - value = None - return value - - def _extend_network_dict_provider(self, context, network): - id = network['id'] - segments = db.get_network_segments(context.session, id) - if not segments: - LOG.error(_("Network %s has no segments"), id) - network[provider.NETWORK_TYPE] = None - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = None - elif len(segments) > 1: - network[mpnet.SEGMENTS] = [ - {provider.NETWORK_TYPE: segment[api.NETWORK_TYPE], - provider.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK], - provider.SEGMENTATION_ID: segment[api.SEGMENTATION_ID]} - for segment in segments] - else: - segment = segments[0] - network[provider.NETWORK_TYPE] = segment[api.NETWORK_TYPE] - network[provider.PHYSICAL_NETWORK] = segment[api.PHYSICAL_NETWORK] - network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] - - def _filter_nets_provider(self, context, nets, filters): - # TODO(rkukura): Implement filtering. - return nets - - def _process_port_binding(self, mech_context, attrs): - binding = mech_context._binding - port = mech_context.current - self._update_port_dict_binding(port, binding) - - host = attrs and attrs.get(portbindings.HOST_ID) - host_set = attributes.is_attr_set(host) - - vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) - vnic_type_set = attributes.is_attr_set(vnic_type) - - # CLI can't send {}, so treat None as {} - profile = attrs and attrs.get(portbindings.PROFILE) - profile_set = profile is not attributes.ATTR_NOT_SPECIFIED - if profile_set and not profile: - profile = {} - - if binding.vif_type != portbindings.VIF_TYPE_UNBOUND: - if (not host_set and not vnic_type_set and not profile_set and - binding.segment): - return False - self._delete_port_binding(mech_context) - - # Return True only if an agent notification is needed. - # This will happen if a new host, vnic_type, or profile was specified - # that differs from the current one. Note that host_set is True - # even if the host is an empty string - ret_value = ((host_set and binding.get('host') != host) or - (vnic_type_set and - binding.get('vnic_type') != vnic_type) or - (profile_set and self._get_profile(binding) != profile)) - - if host_set: - binding.host = host - port[portbindings.HOST_ID] = host - - if vnic_type_set: - binding.vnic_type = vnic_type - port[portbindings.VNIC_TYPE] = vnic_type - - if profile_set: - binding.profile = jsonutils.dumps(profile) - if len(binding.profile) > models.BINDING_PROFILE_LEN: - msg = _("binding:profile value too large") - raise exc.InvalidInput(error_message=msg) - port[portbindings.PROFILE] = profile - - # To try to [re]bind if host is non-empty. - if binding.host: - self.mechanism_manager.bind_port(mech_context) - self._update_port_dict_binding(port, binding) - - # Update the port status if requested by the bound driver. - if binding.segment and mech_context._new_port_status: - # REVISIT(rkukura): This function is currently called - # inside a transaction with the port either newly - # created or locked for update. After the fix for bug - # 1276391 is merged, this will no longer be true, and - # the port status update will need to be handled in - # the transaction that commits the new binding. - port_db = db.get_port(mech_context._plugin_context.session, - port['id']) - port_db.status = mech_context._new_port_status - port['status'] = mech_context._new_port_status - - return ret_value - - def _update_port_dict_binding(self, port, binding): - port[portbindings.HOST_ID] = binding.host - port[portbindings.VNIC_TYPE] = binding.vnic_type - port[portbindings.PROFILE] = self._get_profile(binding) - port[portbindings.VIF_TYPE] = binding.vif_type - port[portbindings.VIF_DETAILS] = self._get_vif_details(binding) - - def _get_vif_details(self, binding): - if binding.vif_details: - try: - return jsonutils.loads(binding.vif_details) - except Exception: - LOG.error(_("Serialized vif_details DB value '%(value)s' " - "for port %(port)s is invalid"), - {'value': binding.vif_details, - 'port': binding.port_id}) - return {} - - def _get_profile(self, binding): - if binding.profile: - try: - return jsonutils.loads(binding.profile) - except Exception: - LOG.error(_("Serialized profile DB value '%(value)s' for " - "port %(port)s is invalid"), - {'value': binding.profile, - 'port': binding.port_id}) - return {} - - def _delete_port_binding(self, mech_context): - binding = mech_context._binding - binding.vif_type = portbindings.VIF_TYPE_UNBOUND - binding.vif_details = '' - binding.driver = None - binding.segment = None - port = mech_context.current - self._update_port_dict_binding(port, binding) - - def _ml2_extend_port_dict_binding(self, port_res, port_db): - # None when called during unit tests for other plugins. - if port_db.port_binding: - self._update_port_dict_binding(port_res, port_db.port_binding) - - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.PORTS, ['_ml2_extend_port_dict_binding']) - - # Note - The following hook methods have "ml2" in their names so - # that they are not called twice during unit tests due to global - # registration of hooks in portbindings_db.py used by other - # plugins. - - def _ml2_port_model_hook(self, context, original_model, query): - query = query.outerjoin(models.PortBinding, - (original_model.id == - models.PortBinding.port_id)) - return query - - def _ml2_port_result_filter_hook(self, query, filters): - values = filters and filters.get(portbindings.HOST_ID, []) - if not values: - return query - return query.filter(models.PortBinding.host.in_(values)) - - db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook( - models_v2.Port, - "ml2_port_bindings", - '_ml2_port_model_hook', - None, - '_ml2_port_result_filter_hook') - - def _notify_port_updated(self, mech_context): - port = mech_context._port - segment = mech_context.bound_segment - if not segment: - # REVISIT(rkukura): This should notify agent to unplug port - network = mech_context.network.current - LOG.warning(_("In _notify_port_updated(), no bound segment for " - "port %(port_id)s on network %(network_id)s"), - {'port_id': port['id'], - 'network_id': network['id']}) - return - self.notifier.port_update(mech_context._plugin_context, port, - segment[api.NETWORK_TYPE], - segment[api.SEGMENTATION_ID], - segment[api.PHYSICAL_NETWORK]) - - # TODO(apech): Need to override bulk operations - - def create_network(self, context, network): - net_data = network['network'] - segments = self._process_provider_create(net_data) - tenant_id = self._get_tenant_id_for_create(context, net_data) - - session = context.session - with session.begin(subtransactions=True): - self._ensure_default_security_group(context, tenant_id) - result = super(Ml2Plugin, self).create_network(context, network) - network_id = result['id'] - self._process_l3_create(context, result, net_data) - # REVISIT(rkukura): Consider moving all segment management - # to TypeManager. - if segments: - for segment in segments: - self.type_manager.reserve_provider_segment(session, - segment) - db.add_network_segment(session, network_id, segment) - else: - segment = self.type_manager.allocate_tenant_segment(session) - db.add_network_segment(session, network_id, segment) - self._extend_network_dict_provider(context, result) - mech_context = driver_context.NetworkContext(self, context, - result) - self.mechanism_manager.create_network_precommit(mech_context) - - try: - self.mechanism_manager.create_network_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_network_postcommit " - "failed, deleting network '%s'"), result['id']) - self.delete_network(context, result['id']) - return result - - def update_network(self, context, id, network): - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - original_network = super(Ml2Plugin, self).get_network(context, id) - updated_network = super(Ml2Plugin, self).update_network(context, - id, - network) - self._process_l3_update(context, updated_network, - network['network']) - self._extend_network_dict_provider(context, updated_network) - mech_context = driver_context.NetworkContext( - self, context, updated_network, - original_network=original_network) - self.mechanism_manager.update_network_precommit(mech_context) - - # TODO(apech) - handle errors raised by update_network, potentially - # by re-calling update_network with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_network_postcommit(mech_context) - return updated_network - - def get_network(self, context, id, fields=None): - session = context.session - with session.begin(subtransactions=True): - result = super(Ml2Plugin, self).get_network(context, id, None) - self._extend_network_dict_provider(context, result) - - return self._fields(result, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - session = context.session - with session.begin(subtransactions=True): - nets = super(Ml2Plugin, - self).get_networks(context, filters, None, sorts, - limit, marker, page_reverse) - for net in nets: - self._extend_network_dict_provider(context, net) - - nets = self._filter_nets_provider(context, nets, filters) - nets = self._filter_nets_l3(context, nets, filters) - - return [self._fields(net, fields) for net in nets] - - def delete_network(self, context, id): - # REVISIT(rkukura) The super(Ml2Plugin, self).delete_network() - # function is not used because it auto-deletes ports and - # subnets from the DB without invoking the derived class's - # delete_port() or delete_subnet(), preventing mechanism - # drivers from being called. This approach should be revisited - # when the API layer is reworked during icehouse. - - LOG.debug(_("Deleting network %s"), id) - session = context.session - while True: - try: - with session.begin(subtransactions=True): - self._process_l3_delete(context, id) - - # Get ports to auto-delete. - ports = (session.query(models_v2.Port). - enable_eagerloads(False). - filter_by(network_id=id). - with_lockmode('update').all()) - LOG.debug(_("Ports to auto-delete: %s"), ports) - only_auto_del = all(p.device_owner - in db_base_plugin_v2. - AUTO_DELETE_PORT_OWNERS - for p in ports) - if not only_auto_del: - LOG.debug(_("Tenant-owned ports exist")) - raise exc.NetworkInUse(net_id=id) - - # Get subnets to auto-delete. - subnets = (session.query(models_v2.Subnet). - enable_eagerloads(False). - filter_by(network_id=id). - with_lockmode('update').all()) - LOG.debug(_("Subnets to auto-delete: %s"), subnets) - - if not (ports or subnets): - network = self.get_network(context, id) - mech_context = driver_context.NetworkContext(self, - context, - network) - self.mechanism_manager.delete_network_precommit( - mech_context) - - record = self._get_network(context, id) - LOG.debug(_("Deleting network record %s"), record) - session.delete(record) - - for segment in mech_context.network_segments: - self.type_manager.release_segment(session, segment) - - # The segment records are deleted via cascade from the - # network record, so explicit removal is not necessary. - LOG.debug(_("Committing transaction")) - break - except os_db.exception.DBError as e: - with excutils.save_and_reraise_exception() as ctxt: - if isinstance(e.inner_exception, sql_exc.IntegrityError): - ctxt.reraise = False - msg = _("A concurrent port creation has occurred") - LOG.warning(msg) - continue - - for port in ports: - try: - self.delete_port(context, port.id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception auto-deleting port %s"), - port.id) - - for subnet in subnets: - try: - self.delete_subnet(context, subnet.id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception auto-deleting subnet %s"), - subnet.id) - - try: - self.mechanism_manager.delete_network_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the network. Ideally we'd notify the caller of - # the fact that an error occurred. - LOG.error(_("mechanism_manager.delete_network_postcommit failed")) - self.notifier.network_delete(context, id) - - def create_subnet(self, context, subnet): - session = context.session - with session.begin(subtransactions=True): - result = super(Ml2Plugin, self).create_subnet(context, subnet) - mech_context = driver_context.SubnetContext(self, context, result) - self.mechanism_manager.create_subnet_precommit(mech_context) - - try: - self.mechanism_manager.create_subnet_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_subnet_postcommit " - "failed, deleting subnet '%s'"), result['id']) - self.delete_subnet(context, result['id']) - return result - - def update_subnet(self, context, id, subnet): - session = context.session - with session.begin(subtransactions=True): - original_subnet = super(Ml2Plugin, self).get_subnet(context, id) - updated_subnet = super(Ml2Plugin, self).update_subnet( - context, id, subnet) - mech_context = driver_context.SubnetContext( - self, context, updated_subnet, original_subnet=original_subnet) - self.mechanism_manager.update_subnet_precommit(mech_context) - - # TODO(apech) - handle errors raised by update_subnet, potentially - # by re-calling update_subnet with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_subnet_postcommit(mech_context) - return updated_subnet - - def delete_subnet(self, context, id): - # REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet() - # function is not used because it deallocates the subnet's addresses - # from ports in the DB without invoking the derived class's - # update_port(), preventing mechanism drivers from being called. - # This approach should be revisited when the API layer is reworked - # during icehouse. - - LOG.debug(_("Deleting subnet %s"), id) - session = context.session - while True: - with session.begin(subtransactions=True): - subnet = self.get_subnet(context, id) - # Get ports to auto-deallocate - allocated = (session.query(models_v2.IPAllocation). - filter_by(subnet_id=id). - join(models_v2.Port). - filter_by(network_id=subnet['network_id']). - with_lockmode('update').all()) - LOG.debug(_("Ports to auto-deallocate: %s"), allocated) - only_auto_del = all(not a.port_id or - a.ports.device_owner in db_base_plugin_v2. - AUTO_DELETE_PORT_OWNERS - for a in allocated) - if not only_auto_del: - LOG.debug(_("Tenant-owned ports exist")) - raise exc.SubnetInUse(subnet_id=id) - - if not allocated: - mech_context = driver_context.SubnetContext(self, context, - subnet) - self.mechanism_manager.delete_subnet_precommit( - mech_context) - - LOG.debug(_("Deleting subnet record")) - record = self._get_subnet(context, id) - session.delete(record) - - LOG.debug(_("Committing transaction")) - break - - for a in allocated: - if a.port_id: - # calling update_port() for each allocation to remove the - # IP from the port and call the MechanismDrivers - data = {'port': - {'fixed_ips': [{'subnet_id': ip.subnet_id, - 'ip_address': ip.ip_address} - for ip in a.ports.fixed_ips - if ip.subnet_id != id]}} - try: - self.update_port(context, a.port_id, data) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Exception deleting fixed_ip from " - "port %s"), a.port_id) - session.delete(a) - - try: - self.mechanism_manager.delete_subnet_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the subnet. Ideally we'd notify the caller of - # the fact that an error occurred. - LOG.error(_("mechanism_manager.delete_subnet_postcommit failed")) - - def create_port(self, context, port): - attrs = port['port'] - attrs['status'] = const.PORT_STATUS_DOWN - - session = context.session - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) - result = super(Ml2Plugin, self).create_port(context, port) - self._process_port_create_security_group(context, result, sgids) - network = self.get_network(context, result['network_id']) - mech_context = driver_context.PortContext(self, context, result, - network) - self._process_port_binding(mech_context, attrs) - result[addr_pair.ADDRESS_PAIRS] = ( - self._process_create_allowed_address_pairs( - context, result, - attrs.get(addr_pair.ADDRESS_PAIRS))) - self._process_port_create_extra_dhcp_opts(context, result, - dhcp_opts) - self.mechanism_manager.create_port_precommit(mech_context) - - try: - self.mechanism_manager.create_port_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - with excutils.save_and_reraise_exception(): - LOG.error(_("mechanism_manager.create_port_postcommit " - "failed, deleting port '%s'"), result['id']) - self.delete_port(context, result['id']) - self.notify_security_groups_member_updated(context, result) - return result - - def update_port(self, context, id, port): - attrs = port['port'] - need_port_update_notify = False - - session = context.session - with session.begin(subtransactions=True): - try: - port_db = (session.query(models_v2.Port). - enable_eagerloads(False). - filter_by(id=id).with_lockmode('update').one()) - except sa_exc.NoResultFound: - raise exc.PortNotFound(port_id=id) - original_port = self._make_port_dict(port_db) - updated_port = super(Ml2Plugin, self).update_port(context, id, - port) - if addr_pair.ADDRESS_PAIRS in port['port']: - need_port_update_notify |= ( - self.update_address_pairs_on_port(context, id, port, - original_port, - updated_port)) - need_port_update_notify |= self.update_security_group_on_port( - context, id, port, original_port, updated_port) - network = self.get_network(context, original_port['network_id']) - need_port_update_notify |= self._update_extra_dhcp_opts_on_port( - context, id, port, updated_port) - mech_context = driver_context.PortContext( - self, context, updated_port, network, - original_port=original_port) - need_port_update_notify |= self._process_port_binding( - mech_context, attrs) - self.mechanism_manager.update_port_precommit(mech_context) - - # TODO(apech) - handle errors raised by update_port, potentially - # by re-calling update_port with the previous attributes. For - # now the error is propogated to the caller, which is expected to - # either undo/retry the operation or delete the resource. - self.mechanism_manager.update_port_postcommit(mech_context) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - - if original_port['admin_state_up'] != updated_port['admin_state_up']: - need_port_update_notify = True - - if need_port_update_notify: - self._notify_port_updated(mech_context) - - return updated_port - - def delete_port(self, context, id, l3_port_check=True): - LOG.debug(_("Deleting port %s"), id) - l3plugin = manager.NeutronManager.get_service_plugins().get( - service_constants.L3_ROUTER_NAT) - if l3plugin and l3_port_check: - l3plugin.prevent_l3_port_deletion(context, id) - - session = context.session - # REVISIT: Serialize this operation with a semaphore to prevent - # undesired eventlet yields leading to 'lock wait timeout' errors - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - try: - port_db = (session.query(models_v2.Port). - enable_eagerloads(False). - filter_by(id=id).with_lockmode('update').one()) - except sa_exc.NoResultFound: - # the port existed when l3plugin.prevent_l3_port_deletion - # was called but now is already gone - LOG.debug(_("The port '%s' was deleted"), id) - return - port = self._make_port_dict(port_db) - - network = self.get_network(context, port['network_id']) - mech_context = driver_context.PortContext(self, context, port, - network) - self.mechanism_manager.delete_port_precommit(mech_context) - self._delete_port_security_group_bindings(context, id) - LOG.debug(_("Calling base delete_port")) - if l3plugin: - l3plugin.disassociate_floatingips(context, id) - - super(Ml2Plugin, self).delete_port(context, id) - - try: - self.mechanism_manager.delete_port_postcommit(mech_context) - except ml2_exc.MechanismDriverError: - # TODO(apech) - One or more mechanism driver failed to - # delete the port. Ideally we'd notify the caller of the - # fact that an error occurred. - LOG.error(_("mechanism_manager.delete_port_postcommit failed")) - self.notify_security_groups_member_updated(context, port) - - def update_port_status(self, context, port_id, status): - updated = False - session = context.session - # REVISIT: Serialize this operation with a semaphore to prevent - # undesired eventlet yields leading to 'lock wait timeout' errors - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): - port = db.get_port(session, port_id) - if not port: - LOG.warning(_("Port %(port)s updated up by agent not found"), - {'port': port_id}) - return False - if port.status != status: - original_port = self._make_port_dict(port) - port.status = status - updated_port = self._make_port_dict(port) - network = self.get_network(context, - original_port['network_id']) - mech_context = driver_context.PortContext( - self, context, updated_port, network, - original_port=original_port) - self.mechanism_manager.update_port_precommit(mech_context) - updated = True - - if updated: - self.mechanism_manager.update_port_postcommit(mech_context) - - return True - - def port_bound_to_host(self, port_id, host): - port_host = db.get_port_binding_host(port_id) - return (port_host == host) diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py deleted file mode 100644 index c744147c6..000000000 --- a/neutron/plugins/ml2/rpc.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo import messaging - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import api as db_api -from neutron.db import dhcp_rpc_base -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron import manager -from neutron.openstack.common import log -from neutron.openstack.common import uuidutils -from neutron.plugins.ml2 import db -from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers import type_tunnel -# REVISIT(kmestery): Allow the type and mechanism drivers to supply the -# mixins and eventually remove the direct dependencies on type_tunnel. - -LOG = log.getLogger(__name__) - -TAP_DEVICE_PREFIX = 'tap' -TAP_DEVICE_PREFIX_LENGTH = 3 - - -class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin, - type_tunnel.TunnelRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - # history - # 1.0 Initial version (from openvswitch/linuxbridge) - # 1.1 Support Security Group RPC - - # FIXME(ihrachys): we can't use rpc_compat.RpcCallback here due to - # inheritance problems - target = messaging.Target(version=RPC_API_VERSION) - - def __init__(self, notifier, type_manager): - # REVISIT(kmestery): This depends on the first three super classes - # not having their own __init__ functions. If an __init__() is added - # to one, this could break. Fix this and add a unit test to cover this - # test in H3. - super(RpcCallbacks, self).__init__(notifier, type_manager) - - @classmethod - def _device_to_port_id(cls, device): - # REVISIT(rkukura): Consider calling into MechanismDrivers to - # process device names, or having MechanismDrivers supply list - # of device prefixes to strip. - if device.startswith(TAP_DEVICE_PREFIX): - return device[TAP_DEVICE_PREFIX_LENGTH:] - else: - # REVISIT(irenab): Consider calling into bound MD to - # handle the get_device_details RPC, then remove the 'else' clause - if not uuidutils.is_uuid_like(device): - port = db.get_port_from_device_mac(device) - if port: - return port.id - return device - - @classmethod - def get_port_from_device(cls, device): - port_id = cls._device_to_port_id(device) - port = db.get_port_and_sgs(port_id) - if port: - port['device'] = device - return port - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested by agent " - "%(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port_id = self._device_to_port_id(device) - - session = db_api.get_session() - with session.begin(subtransactions=True): - port = db.get_port(session, port_id) - if not port: - LOG.warning(_("Device %(device)s requested by agent " - "%(agent_id)s not found in database"), - {'device': device, 'agent_id': agent_id}) - return {'device': device} - - segments = db.get_network_segments(session, port.network_id) - if not segments: - LOG.warning(_("Device %(device)s requested by agent " - "%(agent_id)s has network %(network_id)s with " - "no segments"), - {'device': device, - 'agent_id': agent_id, - 'network_id': port.network_id}) - return {'device': device} - - binding = db.ensure_port_binding(session, port.id) - if not binding.segment: - LOG.warning(_("Device %(device)s requested by agent " - "%(agent_id)s on network %(network_id)s not " - "bound, vif_type: %(vif_type)s"), - {'device': device, - 'agent_id': agent_id, - 'network_id': port.network_id, - 'vif_type': binding.vif_type}) - return {'device': device} - - segment = self._find_segment(segments, binding.segment) - if not segment: - LOG.warning(_("Device %(device)s requested by agent " - "%(agent_id)s on network %(network_id)s " - "invalid segment, vif_type: %(vif_type)s"), - {'device': device, - 'agent_id': agent_id, - 'network_id': port.network_id, - 'vif_type': binding.vif_type}) - return {'device': device} - - new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up - else q_const.PORT_STATUS_DOWN) - if port.status != new_status: - plugin = manager.NeutronManager.get_plugin() - plugin.update_port_status(rpc_context, - port_id, - new_status) - port.status = new_status - entry = {'device': device, - 'network_id': port.network_id, - 'port_id': port.id, - 'admin_state_up': port.admin_state_up, - 'network_type': segment[api.NETWORK_TYPE], - 'segmentation_id': segment[api.SEGMENTATION_ID], - 'physical_network': segment[api.PHYSICAL_NETWORK]} - LOG.debug(_("Returning: %s"), entry) - return entry - - def _find_segment(self, segments, segment_id): - for segment in segments: - if segment[api.ID] == segment_id: - return segment - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - # TODO(garyk) - live migration and port status - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - LOG.debug(_("Device %(device)s no longer exists at agent " - "%(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - plugin = manager.NeutronManager.get_plugin() - port_id = self._device_to_port_id(device) - port_exists = True - if (host and not plugin.port_bound_to_host(port_id, host)): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - return {'device': device, - 'exists': port_exists} - - port_exists = plugin.update_port_status(rpc_context, port_id, - q_const.PORT_STATUS_DOWN) - - return {'device': device, - 'exists': port_exists} - - def update_device_up(self, rpc_context, **kwargs): - """Device is up on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - LOG.debug(_("Device %(device)s up at agent %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - plugin = manager.NeutronManager.get_plugin() - port_id = self._device_to_port_id(device) - if (host and not plugin.port_bound_to_host(port_id, host)): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - return - - plugin.update_port_status(rpc_context, port_id, - q_const.PORT_STATUS_ACTIVE) - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin, - type_tunnel.TunnelAgentRpcApiMixin): - """Agent side of the openvswitch rpc API. - - API version history: - 1.0 - Initial version. - 1.1 - Added get_active_networks_info, create_dhcp_port, - update_dhcp_port, and removed get_dhcp_port methods. - - """ - - BASE_RPC_API_VERSION = '1.1' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - - def network_delete(self, context, network_id): - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, network_type, segmentation_id, - physical_network): - self.fanout_cast(context, - self.make_msg('port_update', - port=port, - network_type=network_type, - segmentation_id=segmentation_id, - physical_network=physical_network), - topic=self.topic_port_update) diff --git a/neutron/plugins/mlnx/README b/neutron/plugins/mlnx/README deleted file mode 100644 index 97c24ce0b..000000000 --- a/neutron/plugins/mlnx/README +++ /dev/null @@ -1,8 +0,0 @@ -Mellanox Neutron Plugin - -This plugin implements Neutron v2 APIs with support for -Mellanox embedded switch functionality as part of the -VPI (Ethernet/InfiniBand) HCA. - -For more details on the plugin, please refer to the following link: -https://wiki.openstack.org/wiki/Mellanox-Quantum diff --git a/neutron/plugins/mlnx/__init__.py b/neutron/plugins/mlnx/__init__.py deleted file mode 100644 index c818bfe31..000000000 --- a/neutron/plugins/mlnx/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neutron/plugins/mlnx/agent/__init__.py b/neutron/plugins/mlnx/agent/__init__.py deleted file mode 100644 index c818bfe31..000000000 --- a/neutron/plugins/mlnx/agent/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py b/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py deleted file mode 100644 index f60f02bb7..000000000 --- a/neutron/plugins/mlnx/agent/eswitch_neutron_agent.py +++ /dev/null @@ -1,438 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg - -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import constants as q_constants -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils as q_utils -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.common import constants as p_const -from neutron.plugins.mlnx.agent import utils -from neutron.plugins.mlnx.common import config # noqa -from neutron.plugins.mlnx.common import exceptions - -LOG = logging.getLogger(__name__) - - -class EswitchManager(object): - def __init__(self, interface_mappings, endpoint, timeout): - self.utils = utils.EswitchUtils(endpoint, timeout) - self.interface_mappings = interface_mappings - self.network_map = {} - self.utils.define_fabric_mappings(interface_mappings) - - def get_port_id_by_mac(self, port_mac): - for network_id, data in self.network_map.iteritems(): - for port in data['ports']: - if port['port_mac'] == port_mac: - return port['port_id'] - err_msg = _("Agent cache inconsistency - port id " - "is not stored for %s") % port_mac - LOG.error(err_msg) - raise exceptions.MlnxException(err_msg=err_msg) - - def get_vnics_mac(self): - return set(self.utils.get_attached_vnics().keys()) - - def vnic_port_exists(self, port_mac): - return port_mac in self.utils.get_attached_vnics() - - def remove_network(self, network_id): - if network_id in self.network_map: - del self.network_map[network_id] - else: - LOG.debug(_("Network %s not defined on Agent."), network_id) - - def port_down(self, network_id, physical_network, port_mac): - """Sets port to down. - - Check internal network map for port data. - If port exists set port to Down - """ - for network_id, data in self.network_map.iteritems(): - for port in data['ports']: - if port['port_mac'] == port_mac: - self.utils.port_down(physical_network, port_mac) - return - LOG.info(_('Network %s is not available on this agent'), network_id) - - def port_up(self, network_id, network_type, - physical_network, seg_id, port_id, port_mac): - """Sets port to up. - - Update internal network map with port data. - - Check if vnic defined - - configure eswitch vport - - set port to Up - """ - LOG.debug(_("Connecting port %s"), port_id) - - if network_id not in self.network_map: - self.provision_network(port_id, port_mac, - network_id, network_type, - physical_network, seg_id) - net_map = self.network_map[network_id] - net_map['ports'].append({'port_id': port_id, 'port_mac': port_mac}) - - if network_type == p_const.TYPE_VLAN: - LOG.info(_('Binding Segmentation ID %(seg_id)s' - 'to eSwitch for vNIC mac_address %(mac)s'), - {'seg_id': seg_id, - 'mac': port_mac}) - self.utils.set_port_vlan_id(physical_network, - seg_id, - port_mac) - self.utils.port_up(physical_network, port_mac) - else: - LOG.error(_('Unsupported network type %s'), network_type) - - def port_release(self, port_mac): - """Clear port configuration from eSwitch.""" - for network_id, net_data in self.network_map.iteritems(): - for port in net_data['ports']: - if port['port_mac'] == port_mac: - self.utils.port_release(net_data['physical_network'], - port['port_mac']) - return - LOG.info(_('Port_mac %s is not available on this agent'), port_mac) - - def provision_network(self, port_id, port_mac, - network_id, network_type, - physical_network, segmentation_id): - LOG.info(_("Provisioning network %s"), network_id) - if network_type == p_const.TYPE_VLAN: - LOG.debug(_("Creating VLAN Network")) - else: - LOG.error(_("Unknown network type %(network_type)s " - "for network %(network_id)s"), - {'network_type': network_type, - 'network_id': network_id}) - return - data = { - 'physical_network': physical_network, - 'network_type': network_type, - 'ports': [], - 'vlan_id': segmentation_id} - self.network_map[network_id] = data - - -class MlnxEswitchRpcCallbacks(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - - # Set RPC API version to 1.0 by default. - # history - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - def __init__(self, context, agent): - super(MlnxEswitchRpcCallbacks, self).__init__() - self.context = context - self.agent = agent - self.eswitch = agent.eswitch - self.sg_agent = agent - - def network_delete(self, context, **kwargs): - LOG.debug(_("network_delete received")) - network_id = kwargs.get('network_id') - if not network_id: - LOG.warning(_("Invalid Network ID, cannot remove Network")) - else: - LOG.debug(_("Delete network %s"), network_id) - self.eswitch.remove_network(network_id) - - def port_update(self, context, **kwargs): - LOG.debug(_("port_update received")) - port = kwargs.get('port') - net_type = kwargs.get('network_type') - segmentation_id = kwargs.get('segmentation_id') - if not segmentation_id: - # compatibility with pre-Havana RPC vlan_id encoding - segmentation_id = kwargs.get('vlan_id') - physical_network = kwargs.get('physical_network') - net_id = port['network_id'] - if self.eswitch.vnic_port_exists(port['mac_address']): - if 'security_groups' in port: - self.sg_agent.refresh_firewall() - try: - if port['admin_state_up']: - self.eswitch.port_up(net_id, - net_type, - physical_network, - segmentation_id, - port['id'], - port['mac_address']) - # update plugin about port status - self.agent.plugin_rpc.update_device_up(self.context, - port['mac_address'], - self.agent.agent_id, - cfg.CONF.host) - else: - self.eswitch.port_down(net_id, - physical_network, - port['mac_address']) - # update plugin about port status - self.agent.plugin_rpc.update_device_down( - self.context, - port['mac_address'], - self.agent.agent_id, - cfg.CONF.host) - except rpc_compat.MessagingTimeout: - LOG.error(_("RPC timeout while updating port %s"), port['id']) - else: - LOG.debug(_("No port %s defined on agent."), port['id']) - - -class MlnxEswitchPluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class MlnxEswitchNeutronAgent(sg_rpc.SecurityGroupAgentRpcMixin): - # Set RPC API version to 1.0 by default. - #RPC_API_VERSION = '1.0' - - def __init__(self, interface_mapping): - self._polling_interval = cfg.CONF.AGENT.polling_interval - self._setup_eswitches(interface_mapping) - configurations = {'interface_mappings': interface_mapping} - self.agent_state = { - 'binary': 'neutron-mlnx-agent', - 'host': cfg.CONF.host, - 'topic': q_constants.L2_AGENT_TOPIC, - 'configurations': configurations, - 'agent_type': q_constants.AGENT_TYPE_MLNX, - 'start_flag': True} - self._setup_rpc() - self.init_firewall() - - def _setup_eswitches(self, interface_mapping): - daemon = cfg.CONF.ESWITCH.daemon_endpoint - timeout = cfg.CONF.ESWITCH.request_timeout - self.eswitch = EswitchManager(interface_mapping, daemon, timeout) - - def _report_state(self): - try: - devices = len(self.eswitch.get_vnics_mac()) - self.agent_state.get('configurations')['devices'] = devices - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def _setup_rpc(self): - self.agent_id = 'mlnx-agent.%s' % socket.gethostname() - LOG.info(_("RPC agent_id: %s"), self.agent_id) - - self.topic = topics.AGENT - self.plugin_rpc = MlnxEswitchPluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - # RPC network init - self.context = context.get_admin_context_without_session() - # Handle updates from service - self.endpoints = [MlnxEswitchRpcCallbacks(self.context, self)] - # Define the listening consumers for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.NETWORK, topics.DELETE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - report_interval = cfg.CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def update_ports(self, registered_ports): - ports = self.eswitch.get_vnics_mac() - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def process_network_ports(self, port_info): - resync_a = False - resync_b = False - if port_info.get('added'): - LOG.debug(_("Ports added!")) - resync_a = self.treat_devices_added(port_info['added']) - if port_info.get('removed'): - LOG.debug(_("Ports removed!")) - resync_b = self.treat_devices_removed(port_info['removed']) - # If one of the above opertaions fails => resync with plugin - return (resync_a | resync_b) - - def treat_vif_port(self, port_id, port_mac, - network_id, network_type, - physical_network, segmentation_id, - admin_state_up): - if self.eswitch.vnic_port_exists(port_mac): - if admin_state_up: - self.eswitch.port_up(network_id, - network_type, - physical_network, - segmentation_id, - port_id, - port_mac) - else: - self.eswitch.port_down(network_id, physical_network, port_mac) - else: - LOG.debug(_("No port %s defined on agent."), port_id) - - def treat_devices_added(self, devices): - resync = False - for device in devices: - LOG.info(_("Adding port with mac %s"), device) - try: - dev_details = self.plugin_rpc.get_device_details( - self.context, - device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get device dev_details for device " - "with mac_address %(device)s: due to %(exc)s"), - {'device': device, 'exc': e}) - resync = True - continue - if 'port_id' in dev_details: - LOG.info(_("Port %s updated"), device) - LOG.debug(_("Device details %s"), str(dev_details)) - self.treat_vif_port(dev_details['port_id'], - dev_details['device'], - dev_details['network_id'], - dev_details['network_type'], - dev_details['physical_network'], - dev_details['segmentation_id'], - dev_details['admin_state_up']) - if dev_details.get('admin_state_up'): - self.plugin_rpc.update_device_up(self.context, - device, - self.agent_id) - else: - LOG.debug(_("Device with mac_address %s not defined " - "on Neutron Plugin"), device) - return resync - - def treat_devices_removed(self, devices): - resync = False - for device in devices: - LOG.info(_("Removing device with mac_address %s"), device) - try: - port_id = self.eswitch.get_port_id_by_mac(device) - dev_details = self.plugin_rpc.update_device_down(self.context, - port_id, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("Removing port failed for device %(device)s " - "due to %(exc)s"), {'device': device, 'exc': e}) - resync = True - continue - if dev_details['exists']: - LOG.info(_("Port %s updated."), device) - else: - LOG.debug(_("Device %s not defined on plugin"), device) - self.eswitch.port_release(device) - return resync - - def daemon_loop(self): - sync = True - ports = set() - - LOG.info(_("eSwitch Agent Started!")) - - while True: - try: - start = time.time() - if sync: - LOG.info(_("Agent out of sync with plugin!")) - ports.clear() - sync = False - - port_info = self.update_ports(ports) - # notify plugin about port deltas - if port_info: - LOG.debug(_("Agent loop process devices!")) - # If treat devices fails - must resync with plugin - sync = self.process_network_ports(port_info) - ports = port_info['current'] - except exceptions.RequestTimeout: - LOG.exception(_("Request timeout in agent event loop " - "eSwitchD is not responding - exiting...")) - raise SystemExit(1) - except Exception: - LOG.exception(_("Error in agent event loop")) - sync = True - # sleep till end of polling interval - elapsed = (time.time() - start) - if (elapsed < self._polling_interval): - time.sleep(self._polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)"), - {'polling_interval': self._polling_interval, - 'elapsed': elapsed}) - - -def main(): - common_config.init(sys.argv[1:]) - common_config.setup_logging(cfg.CONF) - - try: - interface_mappings = q_utils.parse_mappings( - cfg.CONF.ESWITCH.physical_interface_mappings) - except ValueError as e: - LOG.error(_("Parsing physical_interface_mappings failed: %s." - " Agent terminated!"), e) - sys.exit(1) - LOG.info(_("Interface mappings: %s"), interface_mappings) - - try: - agent = MlnxEswitchNeutronAgent(interface_mappings) - except Exception as e: - LOG.error(_("Failed on Agent initialisation : %s." - " Agent terminated!"), e) - sys.exit(1) - - # Start everything. - LOG.info(_("Agent initialised successfully, now running... ")) - agent.daemon_loop() - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/neutron/plugins/mlnx/agent/utils.py b/neutron/plugins/mlnx/agent/utils.py deleted file mode 100644 index 924be790f..000000000 --- a/neutron/plugins/mlnx/agent/utils.py +++ /dev/null @@ -1,144 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutron.openstack.common import importutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log as logging -from neutron.plugins.mlnx.common import comm_utils -from neutron.plugins.mlnx.common import exceptions - -zmq = importutils.try_import('eventlet.green.zmq') - -LOG = logging.getLogger(__name__) - - -class EswitchUtils(object): - def __init__(self, daemon_endpoint, timeout): - if not zmq: - msg = _("Failed to import eventlet.green.zmq. " - "Won't connect to eSwitchD - exiting...") - LOG.error(msg) - raise SystemExit(1) - self.__conn = None - self.daemon = daemon_endpoint - self.timeout = timeout - - @property - def _conn(self): - if self.__conn is None: - context = zmq.Context() - socket = context.socket(zmq.REQ) - socket.setsockopt(zmq.LINGER, 0) - socket.connect(self.daemon) - self.__conn = socket - self.poller = zmq.Poller() - self.poller.register(self._conn, zmq.POLLIN) - return self.__conn - - @comm_utils.RetryDecorator(exceptions.RequestTimeout) - def send_msg(self, msg): - self._conn.send(msg) - - socks = dict(self.poller.poll(self.timeout)) - if socks.get(self._conn) == zmq.POLLIN: - recv_msg = self._conn.recv() - response = self.parse_response_msg(recv_msg) - return response - else: - self._conn.setsockopt(zmq.LINGER, 0) - self._conn.close() - self.poller.unregister(self._conn) - self.__conn = None - raise exceptions.RequestTimeout() - - def parse_response_msg(self, recv_msg): - msg = jsonutils.loads(recv_msg) - if msg['status'] == 'OK': - if 'response' in msg: - return msg.get('response') - return - elif msg['status'] == 'FAIL': - msg_dict = dict(action=msg['action'], reason=msg['reason']) - error_msg = _("Action %(action)s failed: %(reason)s") % msg_dict - else: - error_msg = _("Unknown operation status %s") % msg['status'] - LOG.error(error_msg) - raise exceptions.OperationFailed(err_msg=error_msg) - - def get_attached_vnics(self): - LOG.debug(_("get_attached_vnics")) - msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'}) - vnics = self.send_msg(msg) - return vnics - - def set_port_vlan_id(self, physical_network, - segmentation_id, port_mac): - LOG.debug(_("Set Vlan %(segmentation_id)s on Port %(port_mac)s " - "on Fabric %(physical_network)s"), - {'port_mac': port_mac, - 'segmentation_id': segmentation_id, - 'physical_network': physical_network}) - msg = jsonutils.dumps({'action': 'set_vlan', - 'fabric': physical_network, - 'port_mac': port_mac, - 'vlan': segmentation_id}) - self.send_msg(msg) - - def define_fabric_mappings(self, interface_mapping): - for fabric, phy_interface in interface_mapping.iteritems(): - LOG.debug(_("Define Fabric %(fabric)s on interface %(ifc)s"), - {'fabric': fabric, - 'ifc': phy_interface}) - msg = jsonutils.dumps({'action': 'define_fabric_mapping', - 'fabric': fabric, - 'interface': phy_interface}) - self.send_msg(msg) - - def port_up(self, fabric, port_mac): - LOG.debug(_("Port Up for %(port_mac)s on fabric %(fabric)s"), - {'port_mac': port_mac, 'fabric': fabric}) - msg = jsonutils.dumps({'action': 'port_up', - 'fabric': fabric, - 'ref_by': 'mac_address', - 'mac': 'port_mac'}) - self.send_msg(msg) - - def port_down(self, fabric, port_mac): - LOG.debug(_("Port Down for %(port_mac)s on fabric %(fabric)s"), - {'port_mac': port_mac, 'fabric': fabric}) - msg = jsonutils.dumps({'action': 'port_down', - 'fabric': fabric, - 'ref_by': 'mac_address', - 'mac': port_mac}) - self.send_msg(msg) - - def port_release(self, fabric, port_mac): - LOG.debug(_("Port Release for %(port_mac)s on fabric %(fabric)s"), - {'port_mac': port_mac, 'fabric': fabric}) - msg = jsonutils.dumps({'action': 'port_release', - 'fabric': fabric, - 'ref_by': 'mac_address', - 'mac': port_mac}) - self.send_msg(msg) - - def get_eswitch_ports(self, fabric): - # TODO(irena) - to implement for next phase - return {} - - def get_eswitch_id(self, fabric): - # TODO(irena) - to implement for next phase - return "" diff --git a/neutron/plugins/mlnx/agent_notify_api.py b/neutron/plugins/mlnx/agent_notify_api.py deleted file mode 100644 index 1874da826..000000000 --- a/neutron/plugins/mlnx/agent_notify_api.py +++ /dev/null @@ -1,67 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - """Agent side of the Embedded Switch RPC API. - - API version history: - 1.0 - Initial version. - 1.1 - Added get_active_networks_info, create_dhcp_port, - and update_dhcp_port methods. - """ - BASE_RPC_API_VERSION = '1.1' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic = topic - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - - def network_delete(self, context, network_id): - LOG.debug(_("Sending delete network message")) - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, physical_network, - network_type, vlan_id): - LOG.debug(_("Sending update port message")) - kwargs = {'port': port, - 'network_type': network_type, - 'physical_network': physical_network, - 'segmentation_id': vlan_id} - if cfg.CONF.AGENT.rpc_support_old_agents: - kwargs['vlan_id'] = vlan_id - msg = self.make_msg('port_update', **kwargs) - self.fanout_cast(context, msg, - topic=self.topic_port_update) diff --git a/neutron/plugins/mlnx/common/__init__.py b/neutron/plugins/mlnx/common/__init__.py deleted file mode 100644 index c818bfe31..000000000 --- a/neutron/plugins/mlnx/common/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neutron/plugins/mlnx/common/comm_utils.py b/neutron/plugins/mlnx/common/comm_utils.py deleted file mode 100644 index a1a0f4a8a..000000000 --- a/neutron/plugins/mlnx/common/comm_utils.py +++ /dev/null @@ -1,66 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -from oslo.config import cfg - -from neutron.openstack.common import log as logging -from neutron.plugins.mlnx.common import config # noqa - -LOG = logging.getLogger(__name__) - - -class RetryDecorator(object): - """Retry decorator reruns a method 'retries' times if an exception occurs. - - Decorator for retrying a method if exceptionToCheck exception occurs - If method raises exception, retries 'retries' times with increasing - back off period between calls with 'interval' multiplier - - :param exceptionToCheck: the exception to check - :param interval: initial delay between retries in seconds - :param retries: number of times to try before giving up - :raises: exceptionToCheck - """ - sleep_fn = time.sleep - - def __init__(self, exceptionToCheck, - interval=cfg.CONF.ESWITCH.request_timeout / 1000, - retries=cfg.CONF.ESWITCH.retries, - backoff_rate=cfg.CONF.ESWITCH.backoff_rate): - self.exc = exceptionToCheck - self.interval = interval - self.retries = retries - self.backoff_rate = backoff_rate - - def __call__(self, original_func): - def decorated(*args, **kwargs): - sleep_interval = self.interval - num_of_iter = self.retries - while num_of_iter > 0: - try: - return original_func(*args, **kwargs) - except self.exc: - LOG.debug(_("Request timeout - call again after " - "%s seconds"), sleep_interval) - RetryDecorator.sleep_fn(sleep_interval) - num_of_iter -= 1 - sleep_interval *= self.backoff_rate - - return original_func(*args, **kwargs) - return decorated diff --git a/neutron/plugins/mlnx/common/config.py b/neutron/plugins/mlnx/common/config.py deleted file mode 100644 index b75e87be3..000000000 --- a/neutron/plugins/mlnx/common/config.py +++ /dev/null @@ -1,80 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.plugins.mlnx.common import constants - -DEFAULT_VLAN_RANGES = ['default:1:1000'] -DEFAULT_INTERFACE_MAPPINGS = [] - -vlan_opts = [ - cfg.StrOpt('tenant_network_type', default='vlan', - help=_("Network type for tenant networks " - "(local, vlan, or none)")), - cfg.ListOpt('network_vlan_ranges', - default=DEFAULT_VLAN_RANGES, - help=_("List of :: " - "or ")), - cfg.ListOpt('physical_network_type_mappings', - default=[], - help=_("List of : " - " with physical_network_type is either eth or ib")), - cfg.StrOpt('physical_network_type', default='eth', - help=_("Physical network type for provider network " - "(eth or ib)")) -] - - -eswitch_opts = [ - cfg.ListOpt('physical_interface_mappings', - default=DEFAULT_INTERFACE_MAPPINGS, - help=_("List of :")), - cfg.StrOpt('vnic_type', - default=constants.VIF_TYPE_DIRECT, - help=_("Type of VM network interface: mlnx_direct or " - "hostdev")), - cfg.StrOpt('daemon_endpoint', - default='tcp://127.0.0.1:60001', - help=_('eswitch daemon end point')), - cfg.IntOpt('request_timeout', default=3000, - help=_("The number of milliseconds the agent will wait for " - "response on request to daemon.")), - cfg.IntOpt('retries', default=3, - help=_("The number of retries the agent will send request " - "to daemon before giving up")), - cfg.IntOpt('backoff_rate', default=2, - help=_("backoff rate multiplier for waiting period between " - "retries for request to daemon, i.e. value of 2 will " - " double the request timeout each retry")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), - cfg.BoolOpt('rpc_support_old_agents', default=False, - help=_("Enable server RPC compatibility with old agents")), -] - - -cfg.CONF.register_opts(vlan_opts, "MLNX") -cfg.CONF.register_opts(eswitch_opts, "ESWITCH") -cfg.CONF.register_opts(agent_opts, "AGENT") -config.register_agent_state_opts_helper(cfg.CONF) -config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/mlnx/common/constants.py b/neutron/plugins/mlnx/common/constants.py deleted file mode 100644 index 2277cb7bb..000000000 --- a/neutron/plugins/mlnx/common/constants.py +++ /dev/null @@ -1,28 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -LOCAL_VLAN_ID = -2 -FLAT_VLAN_ID = -1 - -# Values for physical network_type -TYPE_IB = 'ib' -TYPE_ETH = 'eth' - -VIF_TYPE_DIRECT = 'mlnx_direct' -VIF_TYPE_HOSTDEV = 'hostdev' - -VNIC_TYPE = 'vnic_type' diff --git a/neutron/plugins/mlnx/common/exceptions.py b/neutron/plugins/mlnx/common/exceptions.py deleted file mode 100644 index 6fd168215..000000000 --- a/neutron/plugins/mlnx/common/exceptions.py +++ /dev/null @@ -1,30 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutron.common import exceptions as qexc - - -class MlnxException(qexc.NeutronException): - message = _("Mlnx Exception: %(err_msg)s") - - -class RequestTimeout(qexc.NeutronException): - message = _("Request Timeout: no response from eSwitchD") - - -class OperationFailed(qexc.NeutronException): - message = _("Operation Failed: %(err_msg)s") diff --git a/neutron/plugins/mlnx/db/__init__.py b/neutron/plugins/mlnx/db/__init__.py deleted file mode 100644 index c818bfe31..000000000 --- a/neutron/plugins/mlnx/db/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neutron/plugins/mlnx/db/mlnx_db_v2.py b/neutron/plugins/mlnx/db/mlnx_db_v2.py deleted file mode 100644 index 507934b0d..000000000 --- a/neutron/plugins/mlnx/db/mlnx_db_v2.py +++ /dev/null @@ -1,257 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from six import moves -from sqlalchemy.orm import exc - -from neutron.common import exceptions as n_exc -import neutron.db.api as db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron import manager -from neutron.openstack.common import log as logging -from neutron.plugins.mlnx.common import config # noqa -from neutron.plugins.mlnx.db import mlnx_models_v2 - -LOG = logging.getLogger(__name__) - - -def _remove_non_allocatable_vlans(session, allocations, - physical_network, vlan_ids): - if physical_network in allocations: - for entry in allocations[physical_network]: - try: - # see if vlan is allocatable - vlan_ids.remove(entry.segmentation_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not entry.allocated: - # it's not, so remove it from table - LOG.debug(_( - "Removing vlan %(seg_id)s on " - "physical network " - "%(net)s from pool"), - {'seg_id': entry.segmentation_id, - 'net': physical_network}) - session.delete(entry) - del allocations[physical_network] - - -def _add_missing_allocatable_vlans(session, physical_network, vlan_ids): - for vlan_id in sorted(vlan_ids): - entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, - vlan_id) - session.add(entry) - - -def _remove_unconfigured_vlans(session, allocations): - for entries in allocations.itervalues(): - for entry in entries: - if not entry.allocated: - LOG.debug(_("Removing vlan %(seg_id)s on physical " - "network %(net)s from pool"), - {'seg_id': entry.segmentation_id, - 'net': entry.physical_network}) - session.delete(entry) - - -def sync_network_states(network_vlan_ranges): - """Synchronize network_states table with current configured VLAN ranges.""" - - session = db.get_session() - with session.begin(): - # get existing allocations for all physical networks - allocations = dict() - entries = (session.query(mlnx_models_v2.SegmentationIdAllocation). - all()) - for entry in entries: - allocations.setdefault(entry.physical_network, set()).add(entry) - - # process vlan ranges for each configured physical network - for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): - # determine current configured allocatable vlans for this - # physical network - vlan_ids = set() - for vlan_range in vlan_ranges: - vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) - - # remove from table unallocated vlans not currently allocatable - _remove_non_allocatable_vlans(session, allocations, - physical_network, vlan_ids) - - # add missing allocatable vlans to table - _add_missing_allocatable_vlans(session, physical_network, vlan_ids) - - # remove from table unallocated vlans for any unconfigured physical - # networks - _remove_unconfigured_vlans(session, allocations) - - -def get_network_state(physical_network, segmentation_id): - """Get entry of specified network.""" - session = db.get_session() - qry = session.query(mlnx_models_v2.SegmentationIdAllocation) - qry = qry.filter_by(physical_network=physical_network, - segmentation_id=segmentation_id) - return qry.first() - - -def reserve_network(session): - with session.begin(subtransactions=True): - entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if not entry: - raise n_exc.NoNetworkAvailable() - LOG.debug(_("Reserving vlan %(seg_id)s on physical network " - "%(net)s from pool"), - {'seg_id': entry.segmentation_id, - 'net': entry.physical_network}) - entry.allocated = True - return (entry.physical_network, entry.segmentation_id) - - -def reserve_specific_network(session, physical_network, segmentation_id): - with session.begin(subtransactions=True): - log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} - try: - entry = (session.query(mlnx_models_v2.SegmentationIdAllocation). - filter_by(physical_network=physical_network, - segmentation_id=segmentation_id). - with_lockmode('update').one()) - if entry.allocated: - raise n_exc.VlanIdInUse(vlan_id=segmentation_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(seg_id)s " - "on physical network %(phy_net)s from pool"), - log_args) - entry.allocated = True - except exc.NoResultFound: - LOG.debug(_("Reserving specific vlan %(seg_id)s on " - "physical network %(phy_net)s outside pool"), - log_args) - entry = mlnx_models_v2.SegmentationIdAllocation(physical_network, - segmentation_id) - entry.allocated = True - session.add(entry) - - -def release_network(session, physical_network, - segmentation_id, network_vlan_ranges): - with session.begin(subtransactions=True): - log_args = {'seg_id': segmentation_id, 'phy_net': physical_network} - try: - state = (session.query(mlnx_models_v2.SegmentationIdAllocation). - filter_by(physical_network=physical_network, - segmentation_id=segmentation_id). - with_lockmode('update'). - one()) - state.allocated = False - inside = False - for vlan_range in network_vlan_ranges.get(physical_network, []): - if (segmentation_id >= vlan_range[0] and - segmentation_id <= vlan_range[1]): - inside = True - break - if inside: - LOG.debug(_("Releasing vlan %(seg_id)s " - "on physical network " - "%(phy_net)s to pool"), - log_args) - else: - LOG.debug(_("Releasing vlan %(seg_id)s " - "on physical network " - "%(phy_net)s outside pool"), - log_args) - session.delete(state) - except exc.NoResultFound: - LOG.warning(_("vlan_id %(seg_id)s on physical network " - "%(phy_net)s not found"), - log_args) - - -def add_network_binding(session, network_id, network_type, - physical_network, vlan_id): - with session.begin(subtransactions=True): - binding = mlnx_models_v2.NetworkBinding(network_id, network_type, - physical_network, vlan_id) - session.add(binding) - - -def get_network_binding(session, network_id): - return (session.query(mlnx_models_v2.NetworkBinding). - filter_by(network_id=network_id).first()) - - -def add_port_profile_binding(session, port_id, vnic_type): - with session.begin(subtransactions=True): - binding = mlnx_models_v2.PortProfileBinding(port_id, vnic_type) - session.add(binding) - - -def get_port_profile_binding(session, port_id): - return (session.query(mlnx_models_v2.PortProfileBinding). - filter_by(port_id=port_id).first()) - - -def get_port_from_device(device): - """Get port from database.""" - LOG.debug(_("get_port_from_device() called")) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id.startswith(device)) - port_and_sgs = query.all() - if not port_and_sgs: - return - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict['security_groups'] = [ - sg_id for port_in_db, sg_id in port_and_sgs if sg_id - ] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict - - -def get_port_from_device_mac(device_mac): - """Get port from database.""" - LOG.debug(_("Get_port_from_device_mac() called")) - session = db.get_session() - qry = session.query(models_v2.Port).filter_by(mac_address=device_mac) - return qry.first() - - -def set_port_status(port_id, status): - """Set the port status.""" - LOG.debug(_("Set_port_status as %s called"), status) - session = db.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.merge(port) - session.flush() - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/mlnx/db/mlnx_models_v2.py b/neutron/plugins/mlnx/db/mlnx_models_v2.py deleted file mode 100644 index 561086157..000000000 --- a/neutron/plugins/mlnx/db/mlnx_models_v2.py +++ /dev/null @@ -1,86 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sqlalchemy as sa - -from neutron.db import model_base - - -class SegmentationIdAllocation(model_base.BASEV2): - """Represents allocation state of segmentation_id on physical network.""" - __tablename__ = 'segmentation_id_allocation' - - physical_network = sa.Column(sa.String(64), nullable=False, - primary_key=True) - segmentation_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False) - - def __init__(self, physical_network, segmentation_id): - self.physical_network = physical_network - self.segmentation_id = segmentation_id - self.allocated = False - - def __repr__(self): - return "" % (self.physical_network, - self.segmentation_id, - self.allocated) - - -class NetworkBinding(model_base.BASEV2): - """Represents binding of virtual network. - - Binds network to physical_network and segmentation_id - """ - __tablename__ = 'mlnx_network_bindings' - - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - network_type = sa.Column(sa.String(32), nullable=False) - physical_network = sa.Column(sa.String(64)) - segmentation_id = sa.Column(sa.Integer, nullable=False) - - def __init__(self, network_id, network_type, physical_network, vlan_id): - self.network_id = network_id - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = vlan_id - - def __repr__(self): - return "" % (self.network_id, - self.network_type, - self.physical_network, - self.segmentation_id) - - -class PortProfileBinding(model_base.BASEV2): - """Represents port profile binding to the port on virtual network.""" - __tablename__ = 'port_profile' - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - vnic_type = sa.Column(sa.String(32), nullable=False) - - def __init__(self, port_id, vnic_type): - self.port_id = port_id - self.vnic_type = vnic_type - - def __repr__(self): - return "" % (self.port_id, - self.vnic_type) diff --git a/neutron/plugins/mlnx/mlnx_plugin.py b/neutron/plugins/mlnx/mlnx_plugin.py deleted file mode 100644 index 16d72df55..000000000 --- a/neutron/plugins/mlnx/mlnx_plugin.py +++ /dev/null @@ -1,512 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants as q_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_gwmode_db -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.common import utils as plugin_utils -from neutron.plugins.mlnx import agent_notify_api -from neutron.plugins.mlnx.common import constants -from neutron.plugins.mlnx.db import mlnx_db_v2 as db -from neutron.plugins.mlnx import rpc_callbacks - -LOG = logging.getLogger(__name__) - - -class MellanoxEswitchPlugin(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - portbindings_db.PortBindingMixin): - """Realization of Neutron API on Mellanox HCA embedded switch technology. - - Current plugin provides embedded HCA Switch connectivity. - Code is based on the Linux Bridge plugin content to - support consistency with L3 & DHCP Agents. - - A new VLAN is created for each network. An agent is relied upon - to perform the actual HCA configuration on each host. - - The provider extension is also supported. - - The port binding extension enables an external application relay - information to and from the plugin. - """ - - # This attribute specifies whether the plugin supports or not - # bulk operations. Name mangling is used in order to ensure it - # is qualified by class - __native_bulk_support = True - - _supported_extension_aliases = ["provider", "external-net", "router", - "ext-gw-mode", "binding", "quotas", - "security-group", "agent", "extraroute", - "l3_agent_scheduler", - "dhcp_agent_scheduler"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - """Start Mellanox Neutron Plugin.""" - super(MellanoxEswitchPlugin, self).__init__() - self._parse_network_config() - db.sync_network_states(self.network_vlan_ranges) - self._set_tenant_network_type() - self.vnic_type = cfg.CONF.ESWITCH.vnic_type - self.base_binding_dict = { - portbindings.VIF_TYPE: self.vnic_type, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - self._setup_rpc() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - cfg.CONF.router_scheduler_driver - ) - LOG.debug(_("Mellanox Embedded Switch Plugin initialisation complete")) - - def _setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [rpc_callbacks.MlnxRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - self.notifier = agent_notify_api.AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( - l3_rpc_agent_api.L3AgentNotifyAPI() - ) - - def _parse_network_config(self): - self._parse_physical_network_types() - self._parse_network_vlan_ranges() - for network in self.network_vlan_ranges.keys(): - if not self.phys_network_type_maps.get(network): - self.phys_network_type_maps[network] = self.physical_net_type - - def _parse_physical_network_types(self): - """Parse physical network types configuration. - - Verify default physical network type is valid. - Parse physical network mappings. - """ - self.physical_net_type = cfg.CONF.MLNX.physical_network_type - if self.physical_net_type not in (constants.TYPE_ETH, - constants.TYPE_IB): - LOG.error(_("Invalid physical network type %(type)s." - "Server terminated!"), {'type': self.physical_net_type}) - raise SystemExit(1) - try: - self.phys_network_type_maps = utils.parse_mappings( - cfg.CONF.MLNX.physical_network_type_mappings) - except ValueError as e: - LOG.error(_("Parsing physical_network_type failed: %s." - " Server terminated!"), e) - raise SystemExit(1) - for network, type in self.phys_network_type_maps.iteritems(): - if type not in (constants.TYPE_ETH, constants.TYPE_IB): - LOG.error(_("Invalid physical network type %(type)s " - " for network %(net)s. Server terminated!"), - {'net': network, 'type': type}) - raise SystemExit(1) - LOG.info(_("Physical Network type mappings: %s"), - self.phys_network_type_maps) - - def _parse_network_vlan_ranges(self): - try: - self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( - cfg.CONF.MLNX.network_vlan_ranges) - except Exception as ex: - LOG.error(_("%s. Server terminated!"), ex) - sys.exit(1) - LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) - - def _extend_network_dict_provider(self, context, network): - binding = db.get_network_binding(context.session, network['id']) - network[provider.NETWORK_TYPE] = binding.network_type - if binding.network_type == svc_constants.TYPE_FLAT: - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = None - elif binding.network_type == svc_constants.TYPE_LOCAL: - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = None - else: - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = binding.segmentation_id - - def _set_tenant_network_type(self): - self.tenant_network_type = cfg.CONF.MLNX.tenant_network_type - if self.tenant_network_type not in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_LOCAL, - svc_constants.TYPE_NONE]: - LOG.error(_("Invalid tenant_network_type: %s. " - "Service terminated!"), - self.tenant_network_type) - sys.exit(1) - - def _process_provider_create(self, context, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) - - if not (network_type_set or physical_network_set or - segmentation_id_set): - return (None, None, None) - - if not network_type_set: - msg = _("provider:network_type required") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == svc_constants.TYPE_FLAT: - self._process_flat_net(segmentation_id_set) - segmentation_id = constants.FLAT_VLAN_ID - - elif network_type == svc_constants.TYPE_VLAN: - self._process_vlan_net(segmentation_id, segmentation_id_set) - - elif network_type == svc_constants.TYPE_LOCAL: - self._process_local_net(physical_network_set, - segmentation_id_set) - segmentation_id = constants.LOCAL_VLAN_ID - physical_network = None - - else: - msg = _("provider:network_type %s not supported") % network_type - raise n_exc.InvalidInput(error_message=msg) - physical_network = self._process_net_type(network_type, - physical_network, - physical_network_set) - return (network_type, physical_network, segmentation_id) - - def _process_flat_net(self, segmentation_id_set): - if segmentation_id_set: - msg = _("provider:segmentation_id specified for flat network") - raise n_exc.InvalidInput(error_message=msg) - - def _process_vlan_net(self, segmentation_id, segmentation_id_set): - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - if not utils.is_valid_vlan_tag(segmentation_id): - msg = (_("provider:segmentation_id out of range " - "(%(min_id)s through %(max_id)s)") % - {'min_id': q_const.MIN_VLAN_TAG, - 'max_id': q_const.MAX_VLAN_TAG}) - raise n_exc.InvalidInput(error_message=msg) - - def _process_local_net(self, physical_network_set, segmentation_id_set): - if physical_network_set: - msg = _("provider:physical_network specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - if segmentation_id_set: - msg = _("provider:segmentation_id specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - - def _process_net_type(self, network_type, - physical_network, - physical_network_set): - if network_type in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_FLAT]: - if physical_network_set: - if physical_network not in self.network_vlan_ranges: - msg = _("Unknown provider:physical_network " - "%s") % physical_network - raise n_exc.InvalidInput(error_message=msg) - elif 'default' in self.network_vlan_ranges: - physical_network = 'default' - else: - msg = _("provider:physical_network required") - raise n_exc.InvalidInput(error_message=msg) - return physical_network - - def _check_port_binding_for_net_type(self, vnic_type, net_type): - """ - VIF_TYPE_DIRECT is valid only for Ethernet fabric - """ - if net_type == constants.TYPE_ETH: - return vnic_type in (constants.VIF_TYPE_DIRECT, - constants.VIF_TYPE_HOSTDEV) - elif net_type == constants.TYPE_IB: - return vnic_type == constants.VIF_TYPE_HOSTDEV - return False - - def _process_port_binding_create(self, context, attrs): - binding_profile = attrs.get(portbindings.PROFILE) - binding_profile_set = attributes.is_attr_set(binding_profile) - - net_binding = db.get_network_binding(context.session, - attrs.get('network_id')) - phy_net = net_binding.physical_network - - if not binding_profile_set: - return self.vnic_type - if constants.VNIC_TYPE in binding_profile: - vnic_type = binding_profile[constants.VNIC_TYPE] - phy_net_type = self.phys_network_type_maps[phy_net] - if vnic_type in (constants.VIF_TYPE_DIRECT, - constants.VIF_TYPE_HOSTDEV): - if self._check_port_binding_for_net_type(vnic_type, - phy_net_type): - self.base_binding_dict[portbindings.VIF_TYPE] = vnic_type - return vnic_type - else: - msg = (_("Unsupported vnic type %(vnic_type)s " - "for physical network type %(net_type)s") % - {'vnic_type': vnic_type, 'net_type': phy_net_type}) - else: - msg = _("Invalid vnic_type on port_create") - else: - msg = _("vnic_type is not defined in port profile") - raise n_exc.InvalidInput(error_message=msg) - - def create_network(self, context, network): - (network_type, physical_network, - vlan_id) = self._process_provider_create(context, - network['network']) - session = context.session - with session.begin(subtransactions=True): - #set up default security groups - tenant_id = self._get_tenant_id_for_create( - context, network['network']) - self._ensure_default_security_group(context, tenant_id) - - if not network_type: - # tenant network - network_type = self.tenant_network_type - if network_type == svc_constants.TYPE_NONE: - raise n_exc.TenantNetworksDisabled() - elif network_type == svc_constants.TYPE_VLAN: - physical_network, vlan_id = db.reserve_network(session) - else: # TYPE_LOCAL - vlan_id = constants.LOCAL_VLAN_ID - else: - # provider network - if network_type in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_FLAT]: - db.reserve_specific_network(session, - physical_network, - vlan_id) - net = super(MellanoxEswitchPlugin, self).create_network(context, - network) - db.add_network_binding(session, net['id'], - network_type, - physical_network, - vlan_id) - - self._process_l3_create(context, net, network['network']) - self._extend_network_dict_provider(context, net) - # note - exception will rollback entire transaction - LOG.debug(_("Created network: %s"), net['id']) - return net - - def update_network(self, context, net_id, network): - LOG.debug(_("Update network")) - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - net = super(MellanoxEswitchPlugin, self).update_network(context, - net_id, - network) - self._process_l3_update(context, net, network['network']) - self._extend_network_dict_provider(context, net) - return net - - def delete_network(self, context, net_id): - LOG.debug(_("Delete network")) - session = context.session - with session.begin(subtransactions=True): - binding = db.get_network_binding(session, net_id) - self._process_l3_delete(context, net_id) - super(MellanoxEswitchPlugin, self).delete_network(context, - net_id) - if binding.segmentation_id != constants.LOCAL_VLAN_ID: - db.release_network(session, binding.physical_network, - binding.segmentation_id, - self.network_vlan_ranges) - # the network_binding record is deleted via cascade from - # the network record, so explicit removal is not necessary - self.notifier.network_delete(context, net_id) - - def get_network(self, context, net_id, fields=None): - session = context.session - with session.begin(subtransactions=True): - net = super(MellanoxEswitchPlugin, self).get_network(context, - net_id, - None) - self._extend_network_dict_provider(context, net) - return self._fields(net, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - session = context.session - with session.begin(subtransactions=True): - nets = super(MellanoxEswitchPlugin, - self).get_networks(context, filters, None, sorts, - limit, marker, page_reverse) - for net in nets: - self._extend_network_dict_provider(context, net) - - return [self._fields(net, fields) for net in nets] - - def _extend_port_dict_binding(self, context, port): - port_binding = db.get_port_profile_binding(context.session, - port['id']) - if port_binding: - port[portbindings.VIF_TYPE] = port_binding.vnic_type - binding = db.get_network_binding(context.session, - port['network_id']) - fabric = binding.physical_network - port[portbindings.PROFILE] = {'physical_network': fabric} - return port - - def create_port(self, context, port): - LOG.debug(_("create_port with %s"), port) - session = context.session - port_data = port['port'] - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - # Set port status as 'DOWN'. This will be updated by agent - port['port']['status'] = q_const.PORT_STATUS_DOWN - - vnic_type = self._process_port_binding_create(context, - port['port']) - - port = super(MellanoxEswitchPlugin, - self).create_port(context, port) - - self._process_portbindings_create_and_update(context, - port_data, - port) - db.add_port_profile_binding(context.session, port['id'], vnic_type) - - self._process_port_create_security_group( - context, port, sgids) - self.notify_security_groups_member_updated(context, port) - return self._extend_port_dict_binding(context, port) - - def get_port(self, context, id, fields=None): - port = super(MellanoxEswitchPlugin, self).get_port(context, - id, - fields) - self._extend_port_dict_binding(context, port) - return self._fields(port, fields) - - def get_ports(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, page_reverse=False): - res_ports = [] - ports = super(MellanoxEswitchPlugin, - self).get_ports(context, filters, fields, sorts, - limit, marker, page_reverse) - for port in ports: - port = self._extend_port_dict_binding(context, port) - res_ports.append(self._fields(port, fields)) - return res_ports - - def update_port(self, context, port_id, port): - original_port = self.get_port(context, port_id) - session = context.session - need_port_update_notify = False - - with session.begin(subtransactions=True): - updated_port = super(MellanoxEswitchPlugin, self).update_port( - context, port_id, port) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - need_port_update_notify = self.update_security_group_on_port( - context, port_id, port, original_port, updated_port) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - - if original_port['admin_state_up'] != updated_port['admin_state_up']: - need_port_update_notify = True - - if need_port_update_notify: - binding = db.get_network_binding(context.session, - updated_port['network_id']) - self.notifier.port_update(context, updated_port, - binding.physical_network, - binding.network_type, - binding.segmentation_id) - return self._extend_port_dict_binding(context, updated_port) - - def delete_port(self, context, port_id, l3_port_check=True): - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, port_id) - - session = context.session - with session.begin(subtransactions=True): - self.disassociate_floatingips(context, port_id) - port = self.get_port(context, port_id) - self._delete_port_security_group_bindings(context, port_id) - super(MellanoxEswitchPlugin, self).delete_port(context, port_id) - - self.notify_security_groups_member_updated(context, port) diff --git a/neutron/plugins/mlnx/rpc_callbacks.py b/neutron/plugins/mlnx/rpc_callbacks.py deleted file mode 100644 index 346d35822..000000000 --- a/neutron/plugins/mlnx/rpc_callbacks.py +++ /dev/null @@ -1,119 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 Mellanox Technologies, Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from oslo.config import cfg - -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.db import api as db_api -from neutron.db import dhcp_rpc_base -from neutron.db import l3_rpc_base -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.openstack.common import log as logging -from neutron.plugins.mlnx.db import mlnx_db_v2 as db - -LOG = logging.getLogger(__name__) - - -class MlnxRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - # History - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - #to be compatible with Linux Bridge Agent on Network Node - TAP_PREFIX_LEN = 3 - - @classmethod - def get_port_from_device(cls, device): - """Get port according to device. - - To maintain compatibility with Linux Bridge L2 Agent for DHCP/L3 - services get device either by linux bridge plugin - device name convention or by mac address - """ - port = db.get_port_from_device(device[cls.TAP_PREFIX_LEN:]) - if port: - port['device'] = device - else: - port = db.get_port_from_device_mac(device) - if port: - port['device'] = device - return port - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self.get_port_from_device(device) - if port: - binding = db.get_network_binding(db_api.get_session(), - port['network_id']) - entry = {'device': device, - 'physical_network': binding.physical_network, - 'network_type': binding.network_type, - 'segmentation_id': binding.segmentation_id, - 'network_id': port['network_id'], - 'port_mac': port['mac_address'], - 'port_id': port['id'], - 'admin_state_up': port['admin_state_up']} - if cfg.CONF.AGENT.rpc_support_old_agents: - entry['vlan_id'] = binding.segmentation_id - new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] - else q_const.PORT_STATUS_DOWN) - if port['status'] != new_status: - db.set_port_status(port['id'], new_status) - else: - entry = {'device': device} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self.get_port_from_device(device) - if port: - entry = {'device': device, - 'exists': True} - if port['status'] != q_const.PORT_STATUS_DOWN: - # Set port status to DOWN - db.set_port_status(port['id'], q_const.PORT_STATUS_DOWN) - else: - entry = {'device': device, - 'exists': False} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_up(self, rpc_context, **kwargs): - """Device is up on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s up %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = self.get_port_from_device(device) - if port: - if port['status'] != q_const.PORT_STATUS_ACTIVE: - # Set port status to ACTIVE - db.set_port_status(port['id'], q_const.PORT_STATUS_ACTIVE) - else: - LOG.debug(_("%s can not be found in database"), device) diff --git a/neutron/plugins/nec/README b/neutron/plugins/nec/README deleted file mode 100644 index 694b80e99..000000000 --- a/neutron/plugins/nec/README +++ /dev/null @@ -1,13 +0,0 @@ -Quantum NEC OpenFlow Plugin - - -# -- What's this? - -https://wiki.openstack.org/wiki/Neutron/NEC_OpenFlow_Plugin - - -# -- Installation - -Use QuickStart Script for this plugin. This provides you auto installation and -configuration of Nova, Neutron and Trema. -https://github.com/nec-openstack/quantum-openflow-plugin/tree/folsom diff --git a/neutron/plugins/nec/__init__.py b/neutron/plugins/nec/__init__.py deleted file mode 100644 index 362a36068..000000000 --- a/neutron/plugins/nec/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/nec/agent/__init__.py b/neutron/plugins/nec/agent/__init__.py deleted file mode 100644 index 362a36068..000000000 --- a/neutron/plugins/nec/agent/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/nec/agent/nec_neutron_agent.py b/neutron/plugins/nec/agent/nec_neutron_agent.py deleted file mode 100755 index 6ab5f82b4..000000000 --- a/neutron/plugins/nec/agent/nec_neutron_agent.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env python -# Copyright 2012 NEC Corporation. -# Based on ryu/openvswitch agents. -# -# Copyright 2012 Isaku Yamahata -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from neutron.agent.linux import ovs_lib -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron import context as q_context -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.nec.common import config - - -LOG = logging.getLogger(__name__) - - -class NECPluginApi(agent_rpc.PluginApi): - BASE_RPC_API_VERSION = '1.0' - - def update_ports(self, context, agent_id, datapath_id, - port_added, port_removed): - """RPC to update information of ports on Neutron Server.""" - LOG.info(_("Update ports: added=%(added)s, " - "removed=%(removed)s"), - {'added': port_added, 'removed': port_removed}) - self.call(context, - self.make_msg('update_ports', - topic=topics.AGENT, - agent_id=agent_id, - datapath_id=datapath_id, - port_added=port_added, - port_removed=port_removed)) - - -class NECAgentRpcCallback(rpc_compat.RpcCallback): - - RPC_API_VERSION = '1.0' - - def __init__(self, context, agent, sg_agent): - super(NECAgentRpcCallback, self).__init__() - self.context = context - self.agent = agent - self.sg_agent = sg_agent - - def port_update(self, context, **kwargs): - LOG.debug(_("port_update received: %s"), kwargs) - port = kwargs.get('port') - # Validate that port is on OVS - vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) - if not vif_port: - return - - if ext_sg.SECURITYGROUPS in port: - self.sg_agent.refresh_firewall() - - -class SecurityGroupServerRpcApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupServerRpcApiMixin): - - def __init__(self, topic): - super(SecurityGroupServerRpcApi, self).__init__( - topic=topic, default_version=sg_rpc.SG_RPC_VERSION) - - -class SecurityGroupAgentRpcCallback( - rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - - RPC_API_VERSION = sg_rpc.SG_RPC_VERSION - - def __init__(self, context, sg_agent): - super(SecurityGroupAgentRpcCallback, self).__init__() - self.context = context - self.sg_agent = sg_agent - - -class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): - - def __init__(self, context): - self.context = context - self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) - self.init_firewall() - - -class NECNeutronAgent(object): - - def __init__(self, integ_br, root_helper, polling_interval): - '''Constructor. - - :param integ_br: name of the integration bridge. - :param root_helper: utility to use when running shell cmds. - :param polling_interval: interval (secs) to check the bridge. - ''' - self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) - self.polling_interval = polling_interval - self.cur_ports = [] - self.need_sync = True - - self.datapath_id = "0x%s" % self.int_br.get_datapath_id() - - self.agent_state = { - 'binary': 'neutron-nec-agent', - 'host': config.CONF.host, - 'topic': q_const.L2_AGENT_TOPIC, - 'configurations': {}, - 'agent_type': q_const.AGENT_TYPE_NEC, - 'start_flag': True} - - self.setup_rpc() - - def setup_rpc(self): - self.host = socket.gethostname() - self.agent_id = 'nec-q-agent.%s' % self.host - LOG.info(_("RPC agent_id: %s"), self.agent_id) - - self.topic = topics.AGENT - self.context = q_context.get_admin_context_without_session() - - self.plugin_rpc = NECPluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - self.sg_agent = SecurityGroupAgentRpc(self.context) - - # RPC network init - # Handle updates from service - self.callback_nec = NECAgentRpcCallback(self.context, - self, self.sg_agent) - self.callback_sg = SecurityGroupAgentRpcCallback(self.context, - self.sg_agent) - self.endpoints = [self.callback_nec, self.callback_sg] - # Define the listening consumer for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - report_interval = config.CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def _report_state(self): - try: - # How many devices are likely used by a VM - num_devices = len(self.cur_ports) - self.agent_state['configurations']['devices'] = num_devices - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def _vif_port_to_port_info(self, vif_port): - return dict(id=vif_port.vif_id, port_no=vif_port.ofport, - mac=vif_port.vif_mac) - - def _process_security_group(self, port_added, port_removed): - if port_added: - devices_added = [p['id'] for p in port_added] - self.sg_agent.prepare_devices_filter(devices_added) - if port_removed: - self.sg_agent.remove_devices_filter(port_removed) - - def loop_handler(self): - try: - # self.cur_ports will be kept until loop_handler succeeds. - cur_ports = [] if self.need_sync else self.cur_ports - new_ports = [] - - port_added = [] - for vif_port in self.int_br.get_vif_ports(): - port_id = vif_port.vif_id - new_ports.append(port_id) - if port_id not in cur_ports: - port_info = self._vif_port_to_port_info(vif_port) - port_added.append(port_info) - - port_removed = [] - for port_id in cur_ports: - if port_id not in new_ports: - port_removed.append(port_id) - - if port_added or port_removed: - self.plugin_rpc.update_ports(self.context, - self.agent_id, self.datapath_id, - port_added, port_removed) - self._process_security_group(port_added, port_removed) - else: - LOG.debug(_("No port changed.")) - - self.cur_ports = new_ports - self.need_sync = False - except Exception: - LOG.exception(_("Error in agent event loop")) - self.need_sync = True - - def daemon_loop(self): - """Main processing loop for NEC Plugin Agent.""" - while True: - self.loop_handler() - time.sleep(self.polling_interval) - - -def main(): - common_config.init(sys.argv[1:]) - - common_config.setup_logging(config.CONF) - - # Determine which agent type to use. - integ_br = config.OVS.integration_bridge - root_helper = config.AGENT.root_helper - polling_interval = config.AGENT.polling_interval - - agent = NECNeutronAgent(integ_br, root_helper, polling_interval) - - # Start everything. - agent.daemon_loop() - - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/nec/common/__init__.py b/neutron/plugins/nec/common/__init__.py deleted file mode 100644 index 362a36068..000000000 --- a/neutron/plugins/nec/common/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/nec/common/config.py b/neutron/plugins/nec/common/config.py deleted file mode 100644 index 70f4a1a63..000000000 --- a/neutron/plugins/nec/common/config.py +++ /dev/null @@ -1,84 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.plugins.nec.common import constants as nconst - - -ovs_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("Integration bridge to use")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), -] - -ofc_opts = [ - cfg.StrOpt('host', default='127.0.0.1', - help=_("Host to connect to")), - cfg.StrOpt('path_prefix', default='', - help=_("Base URL of OFC REST API. " - "It is prepended to each API request.")), - cfg.StrOpt('port', default='8888', - help=_("Port to connect to")), - cfg.StrOpt('driver', default='trema', - help=_("Driver to use")), - cfg.BoolOpt('enable_packet_filter', default=True, - help=_("Enable packet filter")), - cfg.BoolOpt('use_ssl', default=False, - help=_("Use SSL to connect")), - cfg.StrOpt('key_file', - help=_("Key file")), - cfg.StrOpt('cert_file', - help=_("Certificate file")), - cfg.BoolOpt('insecure_ssl', default=False, - help=_("Disable SSL certificate verification")), - cfg.IntOpt('api_max_attempts', default=3, - help=_("Maximum attempts per OFC API request." - "NEC plugin retries API request to OFC " - "when OFC returns ServiceUnavailable (503)." - "The value must be greater than 0.")), -] - -provider_opts = [ - cfg.StrOpt('default_router_provider', - default=nconst.DEFAULT_ROUTER_PROVIDER, - help=_('Default router provider to use.')), - cfg.ListOpt('router_providers', - default=nconst.DEFAULT_ROUTER_PROVIDERS, - help=_('List of enabled router providers.')) -] - - -cfg.CONF.register_opts(ovs_opts, "OVS") -cfg.CONF.register_opts(agent_opts, "AGENT") -cfg.CONF.register_opts(ofc_opts, "OFC") -cfg.CONF.register_opts(provider_opts, "PROVIDER") -config.register_agent_state_opts_helper(cfg.CONF) -config.register_root_helper(cfg.CONF) - -# shortcuts -CONF = cfg.CONF -OVS = cfg.CONF.OVS -AGENT = cfg.CONF.AGENT -OFC = cfg.CONF.OFC -PROVIDER = cfg.CONF.PROVIDER diff --git a/neutron/plugins/nec/common/constants.py b/neutron/plugins/nec/common/constants.py deleted file mode 100644 index b1bc7e5b3..000000000 --- a/neutron/plugins/nec/common/constants.py +++ /dev/null @@ -1,24 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -ROUTER_PROVIDER_L3AGENT = 'l3-agent' -ROUTER_PROVIDER_OPENFLOW = 'openflow' - -DEFAULT_ROUTER_PROVIDERS = [ROUTER_PROVIDER_L3AGENT, ROUTER_PROVIDER_OPENFLOW] -DEFAULT_ROUTER_PROVIDER = ROUTER_PROVIDER_L3AGENT - -ROUTER_STATUS_ACTIVE = 'ACTIVE' -ROUTER_STATUS_ERROR = 'ERROR' diff --git a/neutron/plugins/nec/common/exceptions.py b/neutron/plugins/nec/common/exceptions.py deleted file mode 100644 index 375135586..000000000 --- a/neutron/plugins/nec/common/exceptions.py +++ /dev/null @@ -1,85 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -from neutron.common import exceptions as qexc - - -class OFCException(qexc.NeutronException): - message = _("An OFC exception has occurred: %(reason)s") - - def __init__(self, **kwargs): - super(OFCException, self).__init__(**kwargs) - self.status = kwargs.get('status') - self.err_msg = kwargs.get('err_msg') - self.err_code = kwargs.get('err_code') - - -class OFCResourceNotFound(qexc.NotFound): - message = _("The specified OFC resource (%(resource)s) is not found.") - - -class NECDBException(qexc.NeutronException): - message = _("An exception occurred in NECPluginV2 DB: %(reason)s") - - -class OFCMappingNotFound(qexc.NotFound): - message = _("Neutron-OFC resource mapping for " - "%(resource)s %(neutron_id)s is not found. " - "It may be deleted during processing.") - - -class OFCServiceUnavailable(OFCException): - message = _("OFC returns Server Unavailable (503) " - "(Retry-After=%(retry_after)s)") - - def __init__(self, **kwargs): - super(OFCServiceUnavailable, self).__init__(**kwargs) - self.retry_after = kwargs.get('retry_after') - - -class PortInfoNotFound(qexc.NotFound): - message = _("PortInfo %(id)s could not be found") - - -class ProfilePortInfoInvalidDataPathId(qexc.InvalidInput): - message = _('Invalid input for operation: ' - 'datapath_id should be a hex string ' - 'with at most 8 bytes') - - -class ProfilePortInfoInvalidPortNo(qexc.InvalidInput): - message = _('Invalid input for operation: ' - 'port_no should be [0:65535]') - - -class RouterExternalGatewayNotSupported(qexc.BadRequest): - message = _("Router (provider=%(provider)s) does not support " - "an external network") - - -class ProviderNotFound(qexc.NotFound): - message = _("Provider %(provider)s could not be found") - - -class RouterOverLimit(qexc.Conflict): - message = _("Cannot create more routers with provider=%(provider)s") - - -class RouterProviderMismatch(qexc.Conflict): - message = _("Provider of Router %(router_id)s is %(provider)s. " - "This operation is supported only for router provider " - "%(expected_provider)s.") diff --git a/neutron/plugins/nec/common/ofc_client.py b/neutron/plugins/nec/common/ofc_client.py deleted file mode 100644 index 21fb5f74b..000000000 --- a/neutron/plugins/nec/common/ofc_client.py +++ /dev/null @@ -1,158 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -import time - -import requests - -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import config -from neutron.plugins.nec.common import exceptions as nexc - - -LOG = logging.getLogger(__name__) - - -class OFCClient(object): - """A HTTP/HTTPS client for OFC Drivers.""" - - def __init__(self, host="127.0.0.1", port=8888, use_ssl=False, - key_file=None, cert_file=None, insecure_ssl=False): - """Creates a new client to some OFC. - - :param host: The host where service resides - :param port: The port where service resides - :param use_ssl: True to use SSL, False to use HTTP - :param key_file: The SSL key file to use if use_ssl is true - :param cert_file: The SSL cert file to use if use_ssl is true - :param insecure_ssl: Don't verify SSL certificate - """ - self.host = host - self.port = port - self.use_ssl = use_ssl - self.key_file = key_file - self.cert_file = cert_file - self.insecure_ssl = insecure_ssl - self.connection = None - - def _format_error_message(self, status, detail): - detail = ' ' + detail if detail else '' - return (_("Operation on OFC failed: %(status)s%(msg)s") % - {'status': status, 'msg': detail}) - - def _get_response(self, method, action, body=None): - headers = {"Content-Type": "application/json"} - protocol = "http" - certs = {'key_file': self.key_file, 'cert_file': self.cert_file} - certs = dict((x, certs[x]) for x in certs if certs[x] is not None) - verify = True - - if self.use_ssl: - protocol = "https" - if self.insecure_ssl: - verify = False - - url = "%s://%s:%d%s" % (protocol, self.host, int(self.port), - action) - - res = requests.request(method, url, data=body, headers=headers, - cert=certs, verify=verify) - return res - - def do_single_request(self, method, action, body=None): - action = config.OFC.path_prefix + action - LOG.debug(_("Client request: %(host)s:%(port)s " - "%(method)s %(action)s [%(body)s]"), - {'host': self.host, 'port': self.port, - 'method': method, 'action': action, 'body': body}) - if type(body) is dict: - body = json.dumps(body) - try: - res = self._get_response(method, action, body) - data = res.text - LOG.debug(_("OFC returns [%(status)s:%(data)s]"), - {'status': res.status_code, - 'data': data}) - - # Try to decode JSON data if possible. - try: - data = json.loads(data) - except (ValueError, TypeError): - pass - - if res.status_code in (requests.codes.OK, - requests.codes.CREATED, - requests.codes.ACCEPTED, - requests.codes.NO_CONTENT): - return data - elif res.status_code == requests.codes.SERVICE_UNAVAILABLE: - retry_after = res.headers.get('retry-after') - LOG.warning(_("OFC returns ServiceUnavailable " - "(retry-after=%s)"), retry_after) - raise nexc.OFCServiceUnavailable(retry_after=retry_after) - elif res.status_code == requests.codes.NOT_FOUND: - LOG.info(_("Specified resource %s does not exist on OFC "), - action) - raise nexc.OFCResourceNotFound(resource=action) - else: - LOG.warning(_("Operation on OFC failed: " - "status=%(status)s, detail=%(detail)s"), - {'status': res.status_code, 'detail': data}) - params = {'reason': _("Operation on OFC failed"), - 'status': res.status_code} - if isinstance(data, dict): - params['err_code'] = data.get('err_code') - params['err_msg'] = data.get('err_msg') - else: - params['err_msg'] = data - raise nexc.OFCException(**params) - except requests.exceptions.RequestException as e: - reason = _("Failed to connect OFC : %s") % e - LOG.error(reason) - raise nexc.OFCException(reason=reason) - - def do_request(self, method, action, body=None): - max_attempts = config.OFC.api_max_attempts - for i in range(max_attempts, 0, -1): - try: - return self.do_single_request(method, action, body) - except nexc.OFCServiceUnavailable as e: - with excutils.save_and_reraise_exception() as ctxt: - try: - wait_time = int(e.retry_after) - except (ValueError, TypeError): - wait_time = None - if i > 1 and wait_time: - LOG.info(_("Waiting for %s seconds due to " - "OFC Service_Unavailable."), wait_time) - time.sleep(wait_time) - ctxt.reraise = False - continue - - def get(self, action): - return self.do_request("GET", action) - - def post(self, action, body=None): - return self.do_request("POST", action, body=body) - - def put(self, action, body=None): - return self.do_request("PUT", action, body=body) - - def delete(self, action): - return self.do_request("DELETE", action) diff --git a/neutron/plugins/nec/common/utils.py b/neutron/plugins/nec/common/utils.py deleted file mode 100644 index a628d8ef0..000000000 --- a/neutron/plugins/nec/common/utils.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def cmp_dpid(dpid_a, dpid_b): - """Compare two datapath IDs as hexadecimal int. - - It returns True if equal, otherwise False. - """ - try: - return (int(dpid_a, 16) == int(dpid_b, 16)) - except Exception: - return False diff --git a/neutron/plugins/nec/db/__init__.py b/neutron/plugins/nec/db/__init__.py deleted file mode 100644 index 362a36068..000000000 --- a/neutron/plugins/nec/db/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/nec/db/api.py b/neutron/plugins/nec/db/api.py deleted file mode 100644 index 7963fdce4..000000000 --- a/neutron/plugins/nec/db/api.py +++ /dev/null @@ -1,186 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -import sqlalchemy as sa - -from neutron.db import api as db -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import securitygroup as ext_sg -from neutron import manager -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import config # noqa -from neutron.plugins.nec.common import exceptions as nexc -from neutron.plugins.nec.db import models as nmodels - - -LOG = logging.getLogger(__name__) -OFP_VLAN_NONE = 0xffff - - -resource_map = {'ofc_tenant': nmodels.OFCTenantMapping, - 'ofc_network': nmodels.OFCNetworkMapping, - 'ofc_port': nmodels.OFCPortMapping, - 'ofc_router': nmodels.OFCRouterMapping, - 'ofc_packet_filter': nmodels.OFCFilterMapping} - - -# utitlity methods - -def _get_resource_model(resource): - return resource_map[resource] - - -def clear_db(base=model_base.BASEV2): - db.clear_db(base) - - -def get_ofc_item(session, resource, neutron_id): - model = _get_resource_model(resource) - if not model: - return - try: - return session.query(model).filter_by(neutron_id=neutron_id).one() - except sa.orm.exc.NoResultFound: - return - - -def get_ofc_id(session, resource, neutron_id): - ofc_item = get_ofc_item(session, resource, neutron_id) - if ofc_item: - return ofc_item.ofc_id - else: - raise nexc.OFCMappingNotFound(resource=resource, - neutron_id=neutron_id) - - -def exists_ofc_item(session, resource, neutron_id): - if get_ofc_item(session, resource, neutron_id): - return True - else: - return False - - -def find_ofc_item(session, resource, ofc_id): - try: - model = _get_resource_model(resource) - params = dict(ofc_id=ofc_id) - return (session.query(model).filter_by(**params).one()) - except sa.orm.exc.NoResultFound: - return None - - -def add_ofc_item(session, resource, neutron_id, ofc_id): - try: - model = _get_resource_model(resource) - params = dict(neutron_id=neutron_id, ofc_id=ofc_id) - item = model(**params) - with session.begin(subtransactions=True): - session.add(item) - session.flush() - except Exception as exc: - LOG.exception(exc) - raise nexc.NECDBException(reason=exc.message) - return item - - -def del_ofc_item(session, resource, neutron_id): - try: - model = _get_resource_model(resource) - with session.begin(subtransactions=True): - item = session.query(model).filter_by(neutron_id=neutron_id).one() - session.delete(item) - return True - except sa.orm.exc.NoResultFound: - LOG.warning(_("del_ofc_item(): NotFound item " - "(resource=%(resource)s, id=%(id)s) "), - {'resource': resource, 'id': neutron_id}) - return False - - -def get_portinfo(session, id): - try: - return (session.query(nmodels.PortInfo). - filter_by(id=id). - one()) - except sa.orm.exc.NoResultFound: - return None - - -def add_portinfo(session, id, datapath_id='', port_no=0, - vlan_id=OFP_VLAN_NONE, mac=''): - try: - portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id, - port_no=port_no, vlan_id=vlan_id, mac=mac) - with session.begin(subtransactions=True): - session.add(portinfo) - except Exception as exc: - LOG.exception(exc) - raise nexc.NECDBException(reason=exc.message) - return portinfo - - -def del_portinfo(session, id): - try: - with session.begin(subtransactions=True): - portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one() - session.delete(portinfo) - except sa.orm.exc.NoResultFound: - LOG.warning(_("del_portinfo(): NotFound portinfo for " - "port_id: %s"), id) - - -def get_active_ports_on_ofc(context, network_id, port_id=None): - """Retrieve ports on OFC on a given network. - - It returns a list of tuple (neutron port_id, OFC id). - """ - query = context.session.query(nmodels.OFCPortMapping) - query = query.join(models_v2.Port, - nmodels.OFCPortMapping.neutron_id == models_v2.Port.id) - query = query.filter(models_v2.Port.network_id == network_id) - if port_id: - query = query.filter(nmodels.OFCPortMapping.neutron_id == port_id) - - return [(p['neutron_id'], p['ofc_id']) for p in query] - - -def get_port_from_device(port_id): - """Get port from database.""" - LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id == port_id) - port_and_sgs = query.all() - if not port_and_sgs: - return None - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict[ext_sg.SECURITYGROUPS] = [ - sg_id for port_, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict diff --git a/neutron/plugins/nec/db/models.py b/neutron/plugins/nec/db/models.py deleted file mode 100644 index 302cb610d..000000000 --- a/neutron/plugins/nec/db/models.py +++ /dev/null @@ -1,71 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -import sqlalchemy as sa -from sqlalchemy import orm - -from neutron.db import model_base -from neutron.db import models_v2 - - -"""New mapping tables.""" - - -class OFCId(object): - """Resource ID on OpenFlow Controller.""" - ofc_id = sa.Column(sa.String(255), unique=True, nullable=False) - - -class NeutronId(object): - """Logical ID on Neutron.""" - neutron_id = sa.Column(sa.String(36), primary_key=True) - - -class OFCTenantMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Tenant on OpenFlow Network/Controller.""" - - -class OFCNetworkMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Network on OpenFlow Network/Controller.""" - - -class OFCPortMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Port on OpenFlow Network/Controller.""" - - -class OFCRouterMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a router on OpenFlow Network/Controller.""" - - -class OFCFilterMapping(model_base.BASEV2, NeutronId, OFCId): - """Represents a Filter on OpenFlow Network/Controller.""" - - -class PortInfo(model_base.BASEV2): - """Represents a Virtual Interface.""" - id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - datapath_id = sa.Column(sa.String(36), nullable=False) - port_no = sa.Column(sa.Integer, nullable=False) - vlan_id = sa.Column(sa.Integer, nullable=False) - mac = sa.Column(sa.String(32), nullable=False) - port = orm.relationship( - models_v2.Port, - backref=orm.backref("portinfo", - lazy='joined', uselist=False, - cascade='delete')) diff --git a/neutron/plugins/nec/db/packetfilter.py b/neutron/plugins/nec/db/packetfilter.py deleted file mode 100644 index b4ff68940..000000000 --- a/neutron/plugins/nec/db/packetfilter.py +++ /dev/null @@ -1,220 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012-2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy.orm import exc as sa_exc -from sqlalchemy import sql - -from neutron.api.v2 import attributes -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.openstack.common import uuidutils -from neutron.plugins.nec.db import models as nmodels -from neutron.plugins.nec.extensions import packetfilter as ext_pf - - -PF_STATUS_ACTIVE = 'ACTIVE' -PF_STATUS_DOWN = 'DOWN' -PF_STATUS_ERROR = 'ERROR' - -INT_FIELDS = ('eth_type', 'src_port', 'dst_port') - - -class PacketFilter(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): - """Represents a packet filter.""" - name = sa.Column(sa.String(255)) - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - nullable=False) - priority = sa.Column(sa.Integer, nullable=False) - action = sa.Column(sa.String(16), nullable=False) - # condition - in_port = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - nullable=True) - src_mac = sa.Column(sa.String(32), nullable=False) - dst_mac = sa.Column(sa.String(32), nullable=False) - eth_type = sa.Column(sa.Integer, nullable=False) - src_cidr = sa.Column(sa.String(64), nullable=False) - dst_cidr = sa.Column(sa.String(64), nullable=False) - protocol = sa.Column(sa.String(16), nullable=False) - src_port = sa.Column(sa.Integer, nullable=False) - dst_port = sa.Column(sa.Integer, nullable=False) - # status - admin_state_up = sa.Column(sa.Boolean(), nullable=False) - status = sa.Column(sa.String(16), nullable=False) - - network = orm.relationship( - models_v2.Network, - backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), - uselist=False) - in_port_ref = orm.relationship( - models_v2.Port, - backref=orm.backref('packetfilters', lazy='joined', cascade='delete'), - primaryjoin="Port.id==PacketFilter.in_port", - uselist=False) - - -class PacketFilterDbMixin(object): - - def _make_packet_filter_dict(self, pf_entry, fields=None): - res = {'id': pf_entry['id'], - 'name': pf_entry['name'], - 'tenant_id': pf_entry['tenant_id'], - 'network_id': pf_entry['network_id'], - 'action': pf_entry['action'], - 'priority': pf_entry['priority'], - 'in_port': pf_entry['in_port'], - # "or None" ensure the filed is None if empty - 'src_mac': pf_entry['src_mac'] or None, - 'dst_mac': pf_entry['dst_mac'] or None, - 'eth_type': pf_entry['eth_type'] or None, - 'src_cidr': pf_entry['src_cidr'] or None, - 'dst_cidr': pf_entry['dst_cidr'] or None, - 'protocol': pf_entry['protocol'] or None, - 'src_port': pf_entry['src_port'] or None, - 'dst_port': pf_entry['dst_port'] or None, - 'admin_state_up': pf_entry['admin_state_up'], - 'status': pf_entry['status']} - return self._fields(res, fields) - - def _get_packet_filter(self, context, id): - try: - pf_entry = self._get_by_id(context, PacketFilter, id) - except sa_exc.NoResultFound: - raise ext_pf.PacketFilterNotFound(id=id) - return pf_entry - - def get_packet_filter(self, context, id, fields=None): - pf_entry = self._get_packet_filter(context, id) - return self._make_packet_filter_dict(pf_entry, fields) - - def get_packet_filters(self, context, filters=None, fields=None): - return self._get_collection(context, - PacketFilter, - self._make_packet_filter_dict, - filters=filters, - fields=fields) - - def _replace_unspecified_field(self, params, key): - if not attributes.is_attr_set(params[key]): - if key == 'in_port': - params[key] = None - elif key in INT_FIELDS: - # Integer field - params[key] = 0 - else: - params[key] = '' - - def _get_eth_type_for_protocol(self, protocol): - if protocol.upper() in ("ICMP", "TCP", "UDP"): - return 0x800 - elif protocol.upper() == "ARP": - return 0x806 - - def _set_eth_type_from_protocol(self, filter_dict): - if filter_dict.get('protocol'): - eth_type = self._get_eth_type_for_protocol(filter_dict['protocol']) - if eth_type: - filter_dict['eth_type'] = eth_type - - def _check_eth_type_and_protocol(self, new_filter, current_filter): - if 'protocol' in new_filter or 'eth_type' not in new_filter: - return - eth_type = self._get_eth_type_for_protocol(current_filter['protocol']) - if not eth_type: - return - if eth_type != new_filter['eth_type']: - raise ext_pf.PacketFilterEtherTypeProtocolMismatch( - eth_type=hex(new_filter['eth_type']), - protocol=current_filter['protocol']) - - def create_packet_filter(self, context, packet_filter): - pf_dict = packet_filter['packet_filter'] - tenant_id = self._get_tenant_id_for_create(context, pf_dict) - - if pf_dict['in_port'] == attributes.ATTR_NOT_SPECIFIED: - # validate network ownership - self.get_network(context, pf_dict['network_id']) - else: - # validate port ownership - self.get_port(context, pf_dict['in_port']) - - params = {'tenant_id': tenant_id, - 'id': pf_dict.get('id') or uuidutils.generate_uuid(), - 'name': pf_dict['name'], - 'network_id': pf_dict['network_id'], - 'priority': pf_dict['priority'], - 'action': pf_dict['action'], - 'admin_state_up': pf_dict.get('admin_state_up', True), - 'status': PF_STATUS_DOWN, - 'in_port': pf_dict['in_port'], - 'src_mac': pf_dict['src_mac'], - 'dst_mac': pf_dict['dst_mac'], - 'eth_type': pf_dict['eth_type'], - 'src_cidr': pf_dict['src_cidr'], - 'dst_cidr': pf_dict['dst_cidr'], - 'src_port': pf_dict['src_port'], - 'dst_port': pf_dict['dst_port'], - 'protocol': pf_dict['protocol']} - for key in params: - self._replace_unspecified_field(params, key) - self._set_eth_type_from_protocol(params) - - with context.session.begin(subtransactions=True): - pf_entry = PacketFilter(**params) - context.session.add(pf_entry) - - return self._make_packet_filter_dict(pf_entry) - - def update_packet_filter(self, context, id, packet_filter): - params = packet_filter['packet_filter'] - for key in params: - self._replace_unspecified_field(params, key) - self._set_eth_type_from_protocol(params) - with context.session.begin(subtransactions=True): - pf_entry = self._get_packet_filter(context, id) - self._check_eth_type_and_protocol(params, pf_entry) - pf_entry.update(params) - return self._make_packet_filter_dict(pf_entry) - - def delete_packet_filter(self, context, id): - with context.session.begin(subtransactions=True): - pf_entry = self._get_packet_filter(context, id) - context.session.delete(pf_entry) - - def get_packet_filters_for_port(self, context, port): - """Retrieve packet filters on OFC on a given port. - - It returns a list of tuple (neutron filter_id, OFC id). - """ - query = (context.session.query(nmodels.OFCFilterMapping) - .join(PacketFilter, - nmodels.OFCFilterMapping.neutron_id == PacketFilter.id) - .filter(PacketFilter.admin_state_up == sql.true())) - - network_id = port['network_id'] - net_pf_query = (query.filter(PacketFilter.network_id == network_id) - .filter(PacketFilter.in_port == sql.null())) - net_filters = [(pf['neutron_id'], pf['ofc_id']) for pf in net_pf_query] - - port_pf_query = query.filter(PacketFilter.in_port == port['id']) - port_filters = [(pf['neutron_id'], pf['ofc_id']) - for pf in port_pf_query] - - return net_filters + port_filters diff --git a/neutron/plugins/nec/db/router.py b/neutron/plugins/nec/db/router.py deleted file mode 100644 index 9659cd7fd..000000000 --- a/neutron/plugins/nec/db/router.py +++ /dev/null @@ -1,92 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy.orm import exc as sa_exc - -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class RouterProvider(models_v2.model_base.BASEV2): - """Represents a binding of router_id to provider.""" - provider = sa.Column(sa.String(255)) - router_id = sa.Column(sa.String(36), - sa.ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - - router = orm.relationship(l3_db.Router, uselist=False, - backref=orm.backref('provider', uselist=False, - lazy='joined', - cascade='delete')) - - -def _get_router_providers_query(query, provider=None, router_ids=None): - if provider: - query = query.filter_by(provider=provider) - if router_ids: - column = RouterProvider.router_id - query = query.filter(column.in_(router_ids)) - return query - - -def get_router_providers(session, provider=None, router_ids=None): - """Retrieve a list of a pair of router ID and its provider.""" - query = session.query(RouterProvider) - query = _get_router_providers_query(query, provider, router_ids) - return [{'provider': router.provider, 'router_id': router.router_id} - for router in query] - - -def get_routers_by_provider(session, provider, router_ids=None): - """Retrieve a list of router IDs with the given provider.""" - query = session.query(RouterProvider.router_id) - query = _get_router_providers_query(query, provider, router_ids) - return [router[0] for router in query] - - -def get_router_count_by_provider(session, provider, tenant_id=None): - """Return the number of routers with the given provider.""" - query = session.query(RouterProvider).filter_by(provider=provider) - if tenant_id: - query = (query.join('router'). - filter(l3_db.Router.tenant_id == tenant_id)) - return query.count() - - -def get_provider_by_router(session, router_id): - """Retrieve a provider of the given router.""" - try: - binding = (session.query(RouterProvider). - filter_by(router_id=router_id). - one()) - except sa_exc.NoResultFound: - return None - return binding.provider - - -def add_router_provider_binding(session, provider, router_id): - """Add a router provider association.""" - LOG.debug(_("Add provider binding " - "(router=%(router_id)s, provider=%(provider)s)"), - {'router_id': router_id, 'provider': provider}) - binding = RouterProvider(provider=provider, router_id=router_id) - session.add(binding) - return binding diff --git a/neutron/plugins/nec/drivers/__init__.py b/neutron/plugins/nec/drivers/__init__.py deleted file mode 100644 index 196c699ce..000000000 --- a/neutron/plugins/nec/drivers/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) -DRIVER_PATH = "neutron.plugins.nec.drivers.%s" -DRIVER_LIST = { - 'trema': DRIVER_PATH % "trema.TremaPortBaseDriver", - 'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver", - 'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver", - 'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver", - 'pfc': DRIVER_PATH % "pfc.PFCV51Driver", - 'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver", - 'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver", - 'pfc_v5': DRIVER_PATH % "pfc.PFCV5Driver", - 'pfc_v51': DRIVER_PATH % "pfc.PFCV51Driver", -} - - -def get_driver(driver_name): - LOG.info(_("Loading OFC driver: %s"), driver_name) - driver_klass = DRIVER_LIST.get(driver_name) or driver_name - return importutils.import_class(driver_klass) diff --git a/neutron/plugins/nec/drivers/pfc.py b/neutron/plugins/nec/drivers/pfc.py deleted file mode 100644 index 85921b712..000000000 --- a/neutron/plugins/nec/drivers/pfc.py +++ /dev/null @@ -1,374 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -import re -import uuid - -import netaddr - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.common import exceptions as qexc -from neutron.common import log as call_log -from neutron import manager -from neutron.plugins.nec.common import ofc_client -from neutron.plugins.nec.extensions import packetfilter as ext_pf -from neutron.plugins.nec import ofc_driver_base - - -class InvalidOFCIdFormat(qexc.NeutronException): - message = _("OFC %(resource)s ID has an invalid format: %(ofc_id)s") - - -class PFCDriverBase(ofc_driver_base.OFCDriverBase): - """Base Class for PDC Drivers. - - PFCDriverBase provides methods to handle PFC resources through REST API. - This uses ofc resource path instead of ofc resource ID. - - The class implements the API for PFC V4.0 or later. - """ - - router_supported = False - - match_ofc_network_id = re.compile( - "^/tenants/(?P[^/]+)/networks/(?P[^/]+)$") - match_ofc_port_id = re.compile( - "^/tenants/(?P[^/]+)/networks/(?P[^/]+)" - "/ports/(?P[^/]+)$") - - def __init__(self, conf_ofc): - self.client = ofc_client.OFCClient(host=conf_ofc.host, - port=conf_ofc.port, - use_ssl=conf_ofc.use_ssl, - key_file=conf_ofc.key_file, - cert_file=conf_ofc.cert_file, - insecure_ssl=conf_ofc.insecure_ssl) - - @classmethod - def filter_supported(cls): - return False - - def _generate_pfc_str(self, raw_str): - """Generate PFC acceptable String.""" - return re.sub(r'[^0-9a-zA-Z]', '_', raw_str) - - def _generate_pfc_id(self, id_str): - """Generate ID on PFC. - - Currently, PFC ID must be less than 32. - Shorten UUID string length from 36 to 31 by follows: - * delete UUID Version and hyphen (see RFC4122) - * ensure str length - """ - try: - # openstack.common.uuidutils.is_uuid_like() returns - # False for KeyStone tenant_id, so uuid.UUID is used - # directly here to accept tenant_id as UUID string - uuid_str = str(uuid.UUID(id_str)).replace('-', '') - uuid_no_version = uuid_str[:12] + uuid_str[13:] - return uuid_no_version[:31] - except Exception: - return self._generate_pfc_str(id_str)[:31] - - def _generate_pfc_description(self, desc): - """Generate Description on PFC. - - Currently, PFC Description must be less than 128. - """ - return self._generate_pfc_str(desc)[:127] - - def _extract_ofc_network_id(self, ofc_network_id): - match = self.match_ofc_network_id.match(ofc_network_id) - if match: - return match.group('network_id') - raise InvalidOFCIdFormat(resource='network', ofc_id=ofc_network_id) - - def _extract_ofc_port_id(self, ofc_port_id): - match = self.match_ofc_port_id.match(ofc_port_id) - if match: - return {'tenant': match.group('tenant_id'), - 'network': match.group('network_id'), - 'port': match.group('port_id')} - raise InvalidOFCIdFormat(resource='port', ofc_id=ofc_port_id) - - def create_tenant(self, description, tenant_id=None): - ofc_tenant_id = self._generate_pfc_id(tenant_id) - body = {'id': ofc_tenant_id} - self.client.post('/tenants', body=body) - return '/tenants/' + ofc_tenant_id - - def delete_tenant(self, ofc_tenant_id): - return self.client.delete(ofc_tenant_id) - - def create_network(self, ofc_tenant_id, description, network_id=None): - path = "%s/networks" % ofc_tenant_id - pfc_desc = self._generate_pfc_description(description) - body = {'description': pfc_desc} - res = self.client.post(path, body=body) - ofc_network_id = res['id'] - return path + '/' + ofc_network_id - - def delete_network(self, ofc_network_id): - return self.client.delete(ofc_network_id) - - def create_port(self, ofc_network_id, portinfo, - port_id=None, filters=None): - path = "%s/ports" % ofc_network_id - body = {'datapath_id': portinfo.datapath_id, - 'port': str(portinfo.port_no), - 'vid': str(portinfo.vlan_id)} - if self.filter_supported() and filters: - body['filters'] = [self._extract_ofc_filter_id(pf[1]) - for pf in filters] - res = self.client.post(path, body=body) - ofc_port_id = res['id'] - return path + '/' + ofc_port_id - - def delete_port(self, ofc_port_id): - return self.client.delete(ofc_port_id) - - -class PFCFilterDriverMixin(object): - """PFC PacketFilter Driver Mixin.""" - filters_path = "/filters" - filter_path = "/filters/%s" - - # PFC specific constants - MIN_PRIORITY = 1 - MAX_PRIORITY = 32766 - CREATE_ONLY_FIELDS = ['action', 'priority'] - PFC_ALLOW_ACTION = "pass" - PFC_DROP_ACTION = "drop" - - match_ofc_filter_id = re.compile("^/filters/(?P[^/]+)$") - - @classmethod - def filter_supported(cls): - return True - - def _set_param(self, filter_dict, body, key, create, convert_to=None): - if key in filter_dict: - if filter_dict[key]: - if convert_to: - body[key] = convert_to(filter_dict[key]) - else: - body[key] = filter_dict[key] - elif not create: - body[key] = "" - - def _generate_body(self, filter_dict, apply_ports=None, create=True): - body = {} - - if create: - # action : pass, drop (mandatory) - if filter_dict['action'].lower() in ext_pf.ALLOW_ACTIONS: - body['action'] = self.PFC_ALLOW_ACTION - else: - body['action'] = self.PFC_DROP_ACTION - # priority : mandatory - body['priority'] = filter_dict['priority'] - - for key in ['src_mac', 'dst_mac', 'src_port', 'dst_port']: - self._set_param(filter_dict, body, key, create) - - for key in ['src_cidr', 'dst_cidr']: - # CIDR must contain netmask even if it is an address. - convert_to = lambda x: str(netaddr.IPNetwork(x)) - self._set_param(filter_dict, body, key, create, convert_to) - - # protocol : decimal (0-255) - if 'protocol' in filter_dict: - if (not filter_dict['protocol'] or - # In the case of ARP, ip_proto should be set to wildcard. - # eth_type is set during adding an entry to DB layer. - filter_dict['protocol'].lower() == ext_pf.PROTO_NAME_ARP): - if not create: - body['protocol'] = "" - elif filter_dict['protocol'].lower() == constants.PROTO_NAME_ICMP: - body['protocol'] = constants.PROTO_NUM_ICMP - elif filter_dict['protocol'].lower() == constants.PROTO_NAME_TCP: - body['protocol'] = constants.PROTO_NUM_TCP - elif filter_dict['protocol'].lower() == constants.PROTO_NAME_UDP: - body['protocol'] = constants.PROTO_NUM_UDP - else: - body['protocol'] = int(filter_dict['protocol'], 0) - - # eth_type : hex (0x0-0xFFFF) - self._set_param(filter_dict, body, 'eth_type', create, hex) - - # apply_ports - if apply_ports: - # each element of apply_ports is a tuple of (neutron_id, ofc_id), - body['apply_ports'] = [] - for p in apply_ports: - try: - body['apply_ports'].append(self._extract_ofc_port_id(p[1])) - except InvalidOFCIdFormat: - pass - - return body - - def _validate_filter_common(self, filter_dict): - # Currently PFC support only IPv4 CIDR. - for field in ['src_cidr', 'dst_cidr']: - if (not filter_dict.get(field) or - filter_dict[field] == attributes.ATTR_NOT_SPECIFIED): - continue - net = netaddr.IPNetwork(filter_dict[field]) - if net.version != 4: - raise ext_pf.PacketFilterIpVersionNonSupported( - version=net.version, field=field, value=filter_dict[field]) - if ('priority' in filter_dict and - not (self.MIN_PRIORITY <= filter_dict['priority'] - <= self.MAX_PRIORITY)): - raise ext_pf.PacketFilterInvalidPriority( - min=self.MIN_PRIORITY, max=self.MAX_PRIORITY) - - def _validate_duplicate_priority(self, context, filter_dict): - plugin = manager.NeutronManager.get_plugin() - filters = {'network_id': [filter_dict['network_id']], - 'priority': [filter_dict['priority']]} - ret = plugin.get_packet_filters(context, filters=filters, - fields=['id']) - if ret: - raise ext_pf.PacketFilterDuplicatedPriority( - priority=filter_dict['priority']) - - def validate_filter_create(self, context, filter_dict): - self._validate_filter_common(filter_dict) - self._validate_duplicate_priority(context, filter_dict) - - def validate_filter_update(self, context, filter_dict): - for field in self.CREATE_ONLY_FIELDS: - if field in filter_dict: - raise ext_pf.PacketFilterUpdateNotSupported(field=field) - self._validate_filter_common(filter_dict) - - @call_log.log - def create_filter(self, ofc_network_id, filter_dict, - portinfo=None, filter_id=None, apply_ports=None): - body = self._generate_body(filter_dict, apply_ports, create=True) - res = self.client.post(self.filters_path, body=body) - # filter_id passed from a caller is not used. - # ofc_filter_id is generated by PFC because the prefix of - # filter_id has special meaning and it is internally used. - ofc_filter_id = res['id'] - return self.filter_path % ofc_filter_id - - @call_log.log - def update_filter(self, ofc_filter_id, filter_dict): - body = self._generate_body(filter_dict, create=False) - self.client.put(ofc_filter_id, body) - - @call_log.log - def delete_filter(self, ofc_filter_id): - return self.client.delete(ofc_filter_id) - - def _extract_ofc_filter_id(self, ofc_filter_id): - match = self.match_ofc_filter_id.match(ofc_filter_id) - if match: - return match.group('filter_id') - raise InvalidOFCIdFormat(resource='filter', ofc_id=ofc_filter_id) - - def convert_ofc_filter_id(self, context, ofc_filter_id): - # PFC Packet Filter is supported after the format of mapping tables - # are changed, so it is enough just to return ofc_filter_id - return ofc_filter_id - - -class PFCRouterDriverMixin(object): - - router_supported = True - router_nat_supported = False - - def create_router(self, ofc_tenant_id, router_id, description): - path = '%s/routers' % ofc_tenant_id - res = self.client.post(path, body=None) - ofc_router_id = res['id'] - return path + '/' + ofc_router_id - - def delete_router(self, ofc_router_id): - return self.client.delete(ofc_router_id) - - def add_router_interface(self, ofc_router_id, ofc_net_id, - ip_address=None, mac_address=None): - # ip_address : / (e.g., 10.0.0.0/24) - path = '%s/interfaces' % ofc_router_id - body = {'net_id': self._extract_ofc_network_id(ofc_net_id)} - if ip_address: - body['ip_address'] = ip_address - if mac_address: - body['mac_address'] = mac_address - res = self.client.post(path, body=body) - return path + '/' + res['id'] - - def update_router_interface(self, ofc_router_inf_id, - ip_address=None, mac_address=None): - # ip_address : / (e.g., 10.0.0.0/24) - if not ip_address and not mac_address: - return - body = {} - if ip_address: - body['ip_address'] = ip_address - if mac_address: - body['mac_address'] = mac_address - return self.client.put(ofc_router_inf_id, body=body) - - def delete_router_interface(self, ofc_router_inf_id): - return self.client.delete(ofc_router_inf_id) - - def list_router_routes(self, ofc_router_id): - path = '%s/routes' % ofc_router_id - ret = self.client.get(path) - # Prepend ofc_router_id to route_id - for r in ret['routes']: - r['id'] = ofc_router_id + '/routes/' + r['id'] - return ret['routes'] - - def add_router_route(self, ofc_router_id, destination, nexthop): - path = '%s/routes' % ofc_router_id - body = {'destination': destination, - 'nexthop': nexthop} - ret = self.client.post(path, body=body) - return path + '/' + ret['id'] - - def delete_router_route(self, ofc_router_route_id): - return self.client.delete(ofc_router_route_id) - - -class PFCV3Driver(PFCDriverBase): - - def create_tenant(self, description, tenant_id): - ofc_tenant_id = self._generate_pfc_id(tenant_id) - return "/tenants/" + ofc_tenant_id - - def delete_tenant(self, ofc_tenant_id): - pass - - -class PFCV4Driver(PFCDriverBase): - pass - - -class PFCV5Driver(PFCRouterDriverMixin, PFCDriverBase): - pass - - -class PFCV51Driver(PFCFilterDriverMixin, PFCV5Driver): - pass diff --git a/neutron/plugins/nec/drivers/trema.py b/neutron/plugins/nec/drivers/trema.py deleted file mode 100644 index 875a55d34..000000000 --- a/neutron/plugins/nec/drivers/trema.py +++ /dev/null @@ -1,250 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -from neutron.openstack.common import uuidutils -from neutron.plugins.nec.common import ofc_client -from neutron.plugins.nec import ofc_driver_base - - -class TremaDriverBase(ofc_driver_base.OFCDriverBase): - """Common class for Trema (Sliceable Switch) Drivers.""" - networks_path = "/networks" - network_path = "/networks/%s" - - router_supported = False - - def __init__(self, conf_ofc): - # Trema sliceable REST API does not support HTTPS - self.client = ofc_client.OFCClient(host=conf_ofc.host, - port=conf_ofc.port) - - def _get_network_id(self, ofc_network_id): - # ofc_network_id : /networks/ - return ofc_network_id.split('/')[2] - - def _get_tenant_id(self, tenant_id): - # Trema does not use tenant_id, but it returns - # /tenants/ format to keep consistency with PFC driver. - return '/tenants/' + tenant_id - - def create_tenant(self, description, tenant_id=None): - return self._get_tenant_id(tenant_id or uuidutils.generate_uuid()) - - def update_tenant(self, ofc_tenant_id, description): - pass - - def delete_tenant(self, ofc_tenant_id): - pass - - def create_network(self, ofc_tenant_id, description, network_id=None): - ofc_network_id = network_id or uuidutils.generate_uuid() - body = {'id': ofc_network_id, 'description': description} - self.client.post(self.networks_path, body=body) - return self.network_path % ofc_network_id - - def delete_network(self, ofc_network_id): - return self.client.delete(ofc_network_id) - - -class TremaFilterDriverMixin(object): - """Trema (Sliceable Switch) PacketFilter Driver Mixin.""" - filters_path = "/filters" - filter_path = "/filters/%s" - - @classmethod - def filter_supported(cls): - return True - - def create_filter(self, ofc_network_id, filter_dict, - portinfo=None, filter_id=None, apply_ports=None): - if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]: - ofc_action = "ALLOW" - elif filter_dict['action'].upper() in ["DROP", "DENY"]: - ofc_action = "DENY" - - body = {'priority': filter_dict['priority'], - 'slice': self._get_network_id(ofc_network_id), - 'action': ofc_action} - ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"] - - if portinfo: - body['in_datapath_id'] = portinfo.datapath_id - body['in_port'] = portinfo.port_no - else: - body['wildcards'] = "in_datapath_id" - ofp_wildcards.append("in_port") - - if filter_dict['src_mac']: - body['dl_src'] = filter_dict['src_mac'] - else: - ofp_wildcards.append("dl_src") - - if filter_dict['dst_mac']: - body['dl_dst'] = filter_dict['dst_mac'] - else: - ofp_wildcards.append("dl_dst") - - if filter_dict['src_cidr']: - body['nw_src'] = filter_dict['src_cidr'] - else: - ofp_wildcards.append("nw_src:32") - - if filter_dict['dst_cidr']: - body['nw_dst'] = filter_dict['dst_cidr'] - else: - ofp_wildcards.append("nw_dst:32") - - if filter_dict['protocol']: - if filter_dict['protocol'].upper() == "ICMP": - body['dl_type'] = "0x800" - body['nw_proto'] = hex(1) - elif filter_dict['protocol'].upper() == "TCP": - body['dl_type'] = "0x800" - body['nw_proto'] = hex(6) - elif filter_dict['protocol'].upper() == "UDP": - body['dl_type'] = "0x800" - body['nw_proto'] = hex(17) - elif filter_dict['protocol'].upper() == "ARP": - body['dl_type'] = "0x806" - ofp_wildcards.append("nw_proto") - else: - body['nw_proto'] = filter_dict['protocol'] - else: - ofp_wildcards.append("nw_proto") - - if 'dl_type' in body: - pass - elif filter_dict['eth_type']: - body['dl_type'] = filter_dict['eth_type'] - else: - ofp_wildcards.append("dl_type") - - if filter_dict['src_port']: - body['tp_src'] = hex(filter_dict['src_port']) - else: - ofp_wildcards.append("tp_src") - - if filter_dict['dst_port']: - body['tp_dst'] = hex(filter_dict['dst_port']) - else: - ofp_wildcards.append("tp_dst") - - ofc_filter_id = filter_id or uuidutils.generate_uuid() - body['id'] = ofc_filter_id - - body['ofp_wildcards'] = ','.join(ofp_wildcards) - - self.client.post(self.filters_path, body=body) - return self.filter_path % ofc_filter_id - - def delete_filter(self, ofc_filter_id): - return self.client.delete(ofc_filter_id) - - -class TremaPortBaseDriver(TremaDriverBase, TremaFilterDriverMixin): - """Trema (Sliceable Switch) Driver for port base binding. - - TremaPortBaseDriver uses port base binding. - Ports are identified by datapath_id, port_no and vlan_id. - """ - ports_path = "%(network)s/ports" - port_path = "%(network)s/ports/%(port)s" - - def create_port(self, ofc_network_id, portinfo, - port_id=None, filters=None): - ofc_port_id = port_id or uuidutils.generate_uuid() - path = self.ports_path % {'network': ofc_network_id} - body = {'id': ofc_port_id, - 'datapath_id': portinfo.datapath_id, - 'port': str(portinfo.port_no), - 'vid': str(portinfo.vlan_id)} - self.client.post(path, body=body) - return self.port_path % {'network': ofc_network_id, - 'port': ofc_port_id} - - def delete_port(self, ofc_port_id): - return self.client.delete(ofc_port_id) - - -class TremaPortMACBaseDriver(TremaDriverBase, TremaFilterDriverMixin): - """Trema (Sliceable Switch) Driver for port-mac base binding. - - TremaPortBaseDriver uses port-mac base binding. - Ports are identified by datapath_id, port_no, vlan_id and mac. - """ - ports_path = "%(network)s/ports" - port_path = "%(network)s/ports/%(port)s" - attachments_path = "%(network)s/ports/%(port)s/attachments" - attachment_path = "%(network)s/ports/%(port)s/attachments/%(attachment)s" - - def create_port(self, ofc_network_id, portinfo, port_id=None, - filters=None): - #NOTE: This Driver create slices with Port-MAC Based bindings on Trema - # Sliceable. It's REST API requires Port Based binding before you - # define Port-MAC Based binding. - ofc_port_id = port_id or uuidutils.generate_uuid() - dummy_port_id = "dummy-%s" % ofc_port_id - - path = self.ports_path % {'network': ofc_network_id} - body = {'id': dummy_port_id, - 'datapath_id': portinfo.datapath_id, - 'port': str(portinfo.port_no), - 'vid': str(portinfo.vlan_id)} - self.client.post(path, body=body) - - path = self.attachments_path % {'network': ofc_network_id, - 'port': dummy_port_id} - body = {'id': ofc_port_id, 'mac': portinfo.mac} - self.client.post(path, body=body) - - path = self.port_path % {'network': ofc_network_id, - 'port': dummy_port_id} - self.client.delete(path) - - return self.attachment_path % {'network': ofc_network_id, - 'port': dummy_port_id, - 'attachment': ofc_port_id} - - def delete_port(self, ofc_port_id): - return self.client.delete(ofc_port_id) - - -class TremaMACBaseDriver(TremaDriverBase): - """Trema (Sliceable Switch) Driver for mac base binding. - - TremaPortBaseDriver uses mac base binding. - Ports are identified by mac. - """ - attachments_path = "%(network)s/attachments" - attachment_path = "%(network)s/attachments/%(attachment)s" - - @classmethod - def filter_supported(cls): - return False - - def create_port(self, ofc_network_id, portinfo, port_id=None, - filters=None): - ofc_port_id = port_id or uuidutils.generate_uuid() - path = self.attachments_path % {'network': ofc_network_id} - body = {'id': ofc_port_id, 'mac': portinfo.mac} - self.client.post(path, body=body) - return self.attachment_path % {'network': ofc_network_id, - 'attachment': ofc_port_id} - - def delete_port(self, ofc_port_id): - return self.client.delete(ofc_port_id) diff --git a/neutron/plugins/nec/extensions/__init__.py b/neutron/plugins/nec/extensions/__init__.py deleted file mode 100644 index 362a36068..000000000 --- a/neutron/plugins/nec/extensions/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/nec/extensions/packetfilter.py b/neutron/plugins/nec/extensions/packetfilter.py deleted file mode 100644 index 2dddfd41a..000000000 --- a/neutron/plugins/nec/extensions/packetfilter.py +++ /dev/null @@ -1,208 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012-2013 NEC Corporation. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ryota MIBU -# - -from oslo.config import cfg - -from neutron.api import extensions -from neutron.api.v2 import attributes -from neutron.api.v2 import base -from neutron.common import constants -from neutron.common import exceptions -from neutron import manager -from neutron import quota - - -quota_packet_filter_opts = [ - cfg.IntOpt('quota_packet_filter', - default=100, - help=_("Number of packet_filters allowed per tenant, " - "-1 for unlimited")) -] -cfg.CONF.register_opts(quota_packet_filter_opts, 'QUOTAS') - - -class PacketFilterNotFound(exceptions.NotFound): - message = _("PacketFilter %(id)s could not be found") - - -class PacketFilterIpVersionNonSupported(exceptions.BadRequest): - message = _("IP version %(version)s is not supported for %(field)s " - "(%(value)s is specified)") - - -class PacketFilterInvalidPriority(exceptions.BadRequest): - message = _("Packet Filter priority should be %(min)s-%(max)s (included)") - - -class PacketFilterUpdateNotSupported(exceptions.BadRequest): - message = _("%(field)s field cannot be updated") - - -class PacketFilterDuplicatedPriority(exceptions.BadRequest): - message = _("The backend does not support duplicated priority. " - "Priority %(priority)s is in use") - - -class PacketFilterEtherTypeProtocolMismatch(exceptions.Conflict): - message = _("Ether Type '%(eth_type)s' conflicts with protocol " - "'%(protocol)s'. Update or clear protocol before " - "changing ether type.") - - -def convert_to_int_dec_and_hex(data): - try: - return int(data, 0) - except (ValueError, TypeError): - pass - try: - return int(data) - except (ValueError, TypeError): - msg = _("'%s' is not a integer") % data - raise exceptions.InvalidInput(error_message=msg) - - -def convert_to_int_or_none(data): - if data is None: - return - return convert_to_int_dec_and_hex(data) - - -PROTO_NAME_ARP = 'arp' -SUPPORTED_PROTOCOLS = [constants.PROTO_NAME_ICMP, - constants.PROTO_NAME_TCP, - constants.PROTO_NAME_UDP, - PROTO_NAME_ARP] -ALLOW_ACTIONS = ['allow', 'accept'] -DROP_ACTIONS = ['drop', 'deny'] -SUPPORTED_ACTIONS = ALLOW_ACTIONS + DROP_ACTIONS - -ALIAS = 'packet-filter' -RESOURCE = 'packet_filter' -COLLECTION = 'packet_filters' -PACKET_FILTER_ACTION_REGEX = '(?i)^(%s)$' % '|'.join(SUPPORTED_ACTIONS) -PACKET_FILTER_PROTOCOL_REGEX = ('(?i)^(%s|0x[0-9a-fA-F]+|[0-9]+|)$' % - '|'.join(SUPPORTED_PROTOCOLS)) -PACKET_FILTER_ATTR_PARAMS = { - 'id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, 'default': '', - 'validate': {'type:string': None}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'required_by_policy': True, - 'is_visible': True}, - 'network_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'admin_state_up': {'allow_post': True, 'allow_put': True, - 'default': True, - 'convert_to': attributes.convert_to_boolean, - 'is_visible': True}, - 'status': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'action': {'allow_post': True, 'allow_put': True, - 'validate': {'type:regex': PACKET_FILTER_ACTION_REGEX}, - 'is_visible': True}, - 'priority': {'allow_post': True, 'allow_put': True, - 'convert_to': convert_to_int_dec_and_hex, - 'is_visible': True}, - 'in_port': {'allow_post': True, 'allow_put': False, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'src_mac': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:mac_address_or_none': None}, - 'is_visible': True}, - 'dst_mac': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:mac_address_or_none': None}, - 'is_visible': True}, - 'eth_type': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, - 'src_cidr': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:subnet_or_none': None}, - 'is_visible': True}, - 'dst_cidr': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:subnet_or_none': None}, - 'is_visible': True}, - 'protocol': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'validate': {'type:regex_or_none': - PACKET_FILTER_PROTOCOL_REGEX}, - 'is_visible': True}, - 'src_port': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, - 'dst_port': {'allow_post': True, 'allow_put': True, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'convert_to': convert_to_int_or_none, - 'is_visible': True}, -} -PACKET_FILTER_ATTR_MAP = {COLLECTION: PACKET_FILTER_ATTR_PARAMS} - - -class Packetfilter(extensions.ExtensionDescriptor): - @classmethod - def get_name(cls): - return ALIAS - - @classmethod - def get_alias(cls): - return ALIAS - - @classmethod - def get_description(cls): - return "PacketFilters on OFC" - - @classmethod - def get_namespace(cls): - return "http://www.nec.co.jp/api/ext/packet_filter/v2.0" - - @classmethod - def get_updated(cls): - return "2013-07-16T00:00:00+09:00" - - @classmethod - def get_resources(cls): - qresource = quota.CountableResource(RESOURCE, - quota._count_resource, - 'quota_%s' % RESOURCE) - quota.QUOTAS.register_resource(qresource) - - resource = base.create_resource(COLLECTION, RESOURCE, - manager.NeutronManager.get_plugin(), - PACKET_FILTER_ATTR_PARAMS) - pf_ext = extensions.ResourceExtension( - COLLECTION, resource, attr_map=PACKET_FILTER_ATTR_PARAMS) - return [pf_ext] - - def get_extended_resources(self, version): - if version == "2.0": - return PACKET_FILTER_ATTR_MAP - else: - return {} diff --git a/neutron/plugins/nec/extensions/router_provider.py b/neutron/plugins/nec/extensions/router_provider.py deleted file mode 100644 index 102e23218..000000000 --- a/neutron/plugins/nec/extensions/router_provider.py +++ /dev/null @@ -1,60 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api.v2 import attributes -from neutron.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - -ROUTER_PROVIDER = 'provider' - -ROUTER_PROVIDER_ATTRIBUTE = { - 'routers': {ROUTER_PROVIDER: - {'allow_post': True, - 'allow_put': False, - 'is_visible': True, - 'default': attributes.ATTR_NOT_SPECIFIED} - } -} - - -class Router_provider(object): - @classmethod - def get_name(cls): - return "Router Provider" - - @classmethod - def get_alias(cls): - return "router_provider" - - @classmethod - def get_description(cls): - return "Router Provider Support" - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/router_provider/api/v1.0" - - @classmethod - def get_updated(cls): - return "2013-08-20T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return ROUTER_PROVIDER_ATTRIBUTE - else: - return {} diff --git a/neutron/plugins/nec/nec_plugin.py b/neutron/plugins/nec/nec_plugin.py deleted file mode 100644 index f2225e733..000000000 --- a/neutron/plugins/nec/nec_plugin.py +++ /dev/null @@ -1,781 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012-2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api import extensions as neutron_extensions -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.v2 import attributes as attrs -from neutron.common import constants as const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_base -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.openstack.common import uuidutils -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.nec.common import config -from neutron.plugins.nec.common import exceptions as nexc -from neutron.plugins.nec.common import utils as necutils -from neutron.plugins.nec.db import api as ndb -from neutron.plugins.nec.db import router as rdb -from neutron.plugins.nec import extensions -from neutron.plugins.nec import nec_router -from neutron.plugins.nec import ofc_manager -from neutron.plugins.nec import packet_filter - -LOG = logging.getLogger(__name__) - - -class NECPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - nec_router.RouterMixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - nec_router.L3AgentSchedulerDbMixin, - packet_filter.PacketFilterMixin, - portbindings_db.PortBindingMixin, - addr_pair_db.AllowedAddressPairsMixin): - """NECPluginV2 controls an OpenFlow Controller. - - The Neutron NECPluginV2 maps L2 logical networks to L2 virtualized networks - on an OpenFlow enabled network. An OpenFlow Controller (OFC) provides - L2 network isolation without VLAN and this plugin controls the OFC. - - NOTE: This is for Neutron API V2. Codes for V1.0 and V1.1 are available - at https://github.com/nec-openstack/neutron-openflow-plugin . - - The port binding extension enables an external application relay - information to and from the plugin. - """ - _supported_extension_aliases = ["agent", - "allowed-address-pairs", - "binding", - "dhcp_agent_scheduler", - "external-net", - "ext-gw-mode", - "extraroute", - "l3_agent_scheduler", - "packet-filter", - "quotas", - "router", - "router_provider", - "security-group", - ] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self.remove_packet_filter_extension_if_disabled(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - super(NECPluginV2, self).__init__() - self.ofc = ofc_manager.OFCManager(self.safe_reference) - self.base_binding_dict = self._get_base_binding_dict() - portbindings_base.register_port_dict_function() - - neutron_extensions.append_api_extensions_path(extensions.__path__) - - self.setup_rpc() - self.l3_rpc_notifier = nec_router.L3AgentNotifyAPI() - - self.network_scheduler = importutils.import_object( - config.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - config.CONF.router_scheduler_driver - ) - - nec_router.load_driver(self.safe_reference, self.ofc) - self.port_handlers = { - 'create': { - const.DEVICE_OWNER_ROUTER_GW: self.create_router_port, - const.DEVICE_OWNER_ROUTER_INTF: self.create_router_port, - 'default': self.activate_port_if_ready, - }, - 'delete': { - const.DEVICE_OWNER_ROUTER_GW: self.delete_router_port, - const.DEVICE_OWNER_ROUTER_INTF: self.delete_router_port, - 'default': self.deactivate_port, - } - } - - def setup_rpc(self): - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.notifier = NECPluginV2AgentNotifierApi(topics.AGENT) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[const.AGENT_TYPE_L3] = ( - nec_router.L3AgentNotifyAPI() - ) - - # NOTE: callback_sg is referred to from the sg unit test. - self.callback_sg = SecurityGroupServerRpcCallback() - self.endpoints = [ - NECPluginV2RPCCallbacks(self.safe_reference), - DhcpRpcCallback(), - L3RpcCallback(), - self.callback_sg, - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _update_resource_status(self, context, resource, id, status): - """Update status of specified resource.""" - request = {'status': status} - obj_getter = getattr(self, '_get_%s' % resource) - with context.session.begin(subtransactions=True): - obj_db = obj_getter(context, id) - obj_db.update(request) - - def _update_resource_status_if_changed(self, context, resource_type, - resource_dict, new_status): - if resource_dict['status'] != new_status: - self._update_resource_status(context, resource_type, - resource_dict['id'], - new_status) - resource_dict['status'] = new_status - - def _check_ofc_tenant_in_use(self, context, tenant_id): - """Check if the specified tenant is used.""" - # All networks are created on OFC - filters = {'tenant_id': [tenant_id]} - if self.get_networks_count(context, filters=filters): - return True - if rdb.get_router_count_by_provider(context.session, - nec_router.PROVIDER_OPENFLOW, - tenant_id): - return True - return False - - def _cleanup_ofc_tenant(self, context, tenant_id): - if not self._check_ofc_tenant_in_use(context, tenant_id): - try: - if self.ofc.exists_ofc_tenant(context, tenant_id): - self.ofc.delete_ofc_tenant(context, tenant_id) - else: - LOG.debug(_('_cleanup_ofc_tenant: No OFC tenant for %s'), - tenant_id) - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - reason = _("delete_ofc_tenant() failed due to %s") % exc - LOG.warn(reason) - - def activate_port_if_ready(self, context, port, network=None): - """Activate port by creating port on OFC if ready. - - Conditions to activate port on OFC are: - * port admin_state is UP - * network admin_state is UP - * portinfo are available (to identify port on OFC) - """ - if not network: - network = super(NECPluginV2, self).get_network(context, - port['network_id']) - - if not port['admin_state_up']: - LOG.debug(_("activate_port_if_ready(): skip, " - "port.admin_state_up is False.")) - return port - elif not network['admin_state_up']: - LOG.debug(_("activate_port_if_ready(): skip, " - "network.admin_state_up is False.")) - return port - elif not ndb.get_portinfo(context.session, port['id']): - LOG.debug(_("activate_port_if_ready(): skip, " - "no portinfo for this port.")) - return port - elif self.ofc.exists_ofc_port(context, port['id']): - LOG.debug(_("activate_port_if_ready(): skip, " - "ofc_port already exists.")) - return port - - try: - self.ofc.create_ofc_port(context, port['id'], port) - port_status = const.PORT_STATUS_ACTIVE - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - LOG.error(_("create_ofc_port() failed due to %s"), exc) - port_status = const.PORT_STATUS_ERROR - - if port_status != port['status']: - self._update_resource_status(context, "port", port['id'], - port_status) - port['status'] = port_status - - return port - - def deactivate_port(self, context, port, raise_exc=True): - """Deactivate port by deleting port from OFC if exists.""" - if not self.ofc.exists_ofc_port(context, port['id']): - LOG.debug(_("deactivate_port(): skip, ofc_port for port=%s " - "does not exist."), port['id']) - return port - - try: - self.ofc.delete_ofc_port(context, port['id'], port) - self._update_resource_status_if_changed( - context, "port", port, const.PORT_STATUS_DOWN) - return port - except (nexc.OFCResourceNotFound, nexc.OFCMappingNotFound): - # There is a case where multiple delete_port operation are - # running concurrently. For example, delete_port from - # release_dhcp_port and deletion of network owned ports in - # delete_network. In such cases delete_ofc_port may receive - # 404 error from OFC. - # Also there is a case where neutron port is deleted - # between exists_ofc_port and get_ofc_id in delete_ofc_port. - # In this case OFCMappingNotFound is raised. - # These two cases are valid situations. - LOG.info(_("deactivate_port(): OFC port for port=%s is " - "already removed."), port['id']) - # The port is already removed, so there is no need - # to update status in the database. - port['status'] = const.PORT_STATUS_DOWN - return port - except nexc.OFCException as exc: - with excutils.save_and_reraise_exception() as ctxt: - LOG.error(_("Failed to delete port=%(port)s from OFC: " - "%(exc)s"), {'port': port['id'], 'exc': exc}) - self._update_resource_status_if_changed( - context, "port", port, const.PORT_STATUS_ERROR) - if not raise_exc: - ctxt.reraise = False - return port - - def _net_status(self, network): - # NOTE: NEC Plugin accept admin_state_up. When it's False, this plugin - # deactivate all ports on the network to drop all packet and show - # status='DOWN' to users. But the network is kept defined on OFC. - if network['network']['admin_state_up']: - return const.NET_STATUS_ACTIVE - else: - return const.NET_STATUS_DOWN - - def create_network(self, context, network): - """Create a new network entry on DB, and create it on OFC.""" - LOG.debug(_("NECPluginV2.create_network() called, " - "network=%s ."), network) - tenant_id = self._get_tenant_id_for_create(context, network['network']) - net_name = network['network']['name'] - net_id = uuidutils.generate_uuid() - - #set up default security groups - self._ensure_default_security_group(context, tenant_id) - - network['network']['id'] = net_id - network['network']['status'] = self._net_status(network) - - try: - if not self.ofc.exists_ofc_tenant(context, tenant_id): - self.ofc.create_ofc_tenant(context, tenant_id) - self.ofc.create_ofc_network(context, tenant_id, net_id, net_name) - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - LOG.error(_("Failed to create network id=%(id)s on " - "OFC: %(exc)s"), {'id': net_id, 'exc': exc}) - network['network']['status'] = const.NET_STATUS_ERROR - - with context.session.begin(subtransactions=True): - new_net = super(NECPluginV2, self).create_network(context, network) - self._process_l3_create(context, new_net, network['network']) - - return new_net - - def update_network(self, context, id, network): - """Update network and handle resources associated with the network. - - Update network entry on DB. If 'admin_state_up' was changed, activate - or deactivate ports and packetfilters associated with the network. - """ - LOG.debug(_("NECPluginV2.update_network() called, " - "id=%(id)s network=%(network)s ."), - {'id': id, 'network': network}) - - if 'admin_state_up' in network['network']: - network['network']['status'] = self._net_status(network) - - session = context.session - with session.begin(subtransactions=True): - old_net = super(NECPluginV2, self).get_network(context, id) - new_net = super(NECPluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, new_net, network['network']) - - changed = (old_net['admin_state_up'] != new_net['admin_state_up']) - if changed and not new_net['admin_state_up']: - # disable all active ports of the network - filters = dict(network_id=[id], status=[const.PORT_STATUS_ACTIVE]) - ports = super(NECPluginV2, self).get_ports(context, - filters=filters) - for port in ports: - # If some error occurs, status of errored port is set to ERROR. - # This is avoids too many rollback. - # TODO(amotoki): Raise an exception after all port operations - # are finished to inform the caller of API of the failure. - self.deactivate_port(context, port, raise_exc=False) - elif changed and new_net['admin_state_up']: - # enable ports of the network - filters = dict(network_id=[id], status=[const.PORT_STATUS_DOWN], - admin_state_up=[True]) - ports = super(NECPluginV2, self).get_ports(context, - filters=filters) - for port in ports: - self.activate_port_if_ready(context, port, new_net) - - return new_net - - def delete_network(self, context, id): - """Delete network and packet_filters associated with the network. - - Delete network entry from DB and OFC. Then delete packet_filters - associated with the network. If the network is the last resource - of the tenant, delete unnessary ofc_tenant. - """ - LOG.debug(_("NECPluginV2.delete_network() called, id=%s ."), id) - net_db = self._get_network(context, id) - tenant_id = net_db['tenant_id'] - ports = self.get_ports(context, filters={'network_id': [id]}) - - # check if there are any tenant owned ports in-use; - # consider ports owned by floating ips as auto_delete as if there are - # no other tenant owned ports, those floating ips are disassociated - # and will be auto deleted with self._process_l3_delete() - only_auto_del = all(p['device_owner'] in - db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS or - p['device_owner'] == const.DEVICE_OWNER_FLOATINGIP - for p in ports) - if not only_auto_del: - raise n_exc.NetworkInUse(net_id=id) - - self._process_l3_delete(context, id) - - # Make sure auto-delete ports on OFC are deleted. - # If an error occurs during port deletion, - # delete_network will be aborted. - for port in [p for p in ports if p['device_owner'] - in db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS]: - port = self.deactivate_port(context, port) - - # delete all packet_filters of the network from the controller - for pf in net_db.packetfilters: - self.delete_packet_filter(context, pf['id']) - - if self.ofc.exists_ofc_network(context, id): - try: - self.ofc.delete_ofc_network(context, id, net_db) - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - reason = _("delete_network() failed due to %s") % exc - LOG.error(reason) - self._update_resource_status( - context, "network", net_db['id'], - const.NET_STATUS_ERROR) - - super(NECPluginV2, self).delete_network(context, id) - - self._cleanup_ofc_tenant(context, tenant_id) - - def _get_base_binding_dict(self): - binding = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases, - portbindings.OVS_HYBRID_PLUG: True - } - } - return binding - - def _extend_port_dict_binding_portinfo(self, port_res, portinfo): - if portinfo: - port_res[portbindings.PROFILE] = { - 'datapath_id': portinfo['datapath_id'], - 'port_no': portinfo['port_no'], - } - elif portbindings.PROFILE in port_res: - del port_res[portbindings.PROFILE] - - def _validate_portinfo(self, profile): - key_specs = { - 'datapath_id': {'type:string': None, 'required': True}, - 'port_no': {'type:non_negative': None, 'required': True, - 'convert_to': attrs.convert_to_int} - } - msg = attrs._validate_dict_or_empty(profile, key_specs=key_specs) - if msg: - raise n_exc.InvalidInput(error_message=msg) - - datapath_id = profile.get('datapath_id') - port_no = profile.get('port_no') - try: - dpid = int(datapath_id, 16) - except ValueError: - raise nexc.ProfilePortInfoInvalidDataPathId() - if dpid > 0xffffffffffffffffL: - raise nexc.ProfilePortInfoInvalidDataPathId() - # Make sure dpid is a hex string beginning with 0x. - dpid = hex(dpid) - - if int(port_no) > 65535: - raise nexc.ProfilePortInfoInvalidPortNo() - - return {'datapath_id': dpid, 'port_no': port_no} - - def _process_portbindings_portinfo_create(self, context, port_data, port): - """Add portinfo according to bindings:profile in create_port(). - - :param context: neutron api request context - :param port_data: port attributes passed in PUT request - :param port: port attributes to be returned - """ - profile = port_data.get(portbindings.PROFILE) - # If portbindings.PROFILE is None, unspecified or an empty dict - # it is regarded that portbinding.PROFILE is not set. - profile_set = attrs.is_attr_set(profile) and profile - if profile_set: - portinfo = self._validate_portinfo(profile) - portinfo['mac'] = port['mac_address'] - ndb.add_portinfo(context.session, port['id'], **portinfo) - else: - portinfo = None - self._extend_port_dict_binding_portinfo(port, portinfo) - - def _process_portbindings_portinfo_update(self, context, port_data, port): - """Update portinfo according to bindings:profile in update_port(). - - :param context: neutron api request context - :param port_data: port attributes passed in PUT request - :param port: port attributes to be returned - :returns: 'ADD', 'MOD', 'DEL' or None - """ - if portbindings.PROFILE not in port_data: - return - profile = port_data.get(portbindings.PROFILE) - # If binding:profile is None or an empty dict, - # it means binding:.profile needs to be cleared. - # TODO(amotoki): Allow Make None in binding:profile in - # the API layer. See LP bug #1220011. - profile_set = attrs.is_attr_set(profile) and profile - cur_portinfo = ndb.get_portinfo(context.session, port['id']) - if profile_set: - portinfo = self._validate_portinfo(profile) - portinfo_changed = 'ADD' - if cur_portinfo: - if (necutils.cmp_dpid(portinfo['datapath_id'], - cur_portinfo.datapath_id) and - portinfo['port_no'] == cur_portinfo.port_no): - return - ndb.del_portinfo(context.session, port['id']) - portinfo_changed = 'MOD' - portinfo['mac'] = port['mac_address'] - ndb.add_portinfo(context.session, port['id'], **portinfo) - elif cur_portinfo: - portinfo_changed = 'DEL' - portinfo = None - ndb.del_portinfo(context.session, port['id']) - else: - portinfo = None - portinfo_changed = None - self._extend_port_dict_binding_portinfo(port, portinfo) - return portinfo_changed - - def extend_port_dict_binding(self, port_res, port_db): - super(NECPluginV2, self).extend_port_dict_binding(port_res, port_db) - self._extend_port_dict_binding_portinfo(port_res, port_db.portinfo) - - def _process_portbindings_create(self, context, port_data, port): - super(NECPluginV2, self)._process_portbindings_create_and_update( - context, port_data, port) - self._process_portbindings_portinfo_create(context, port_data, port) - - def _process_portbindings_update(self, context, port_data, port): - super(NECPluginV2, self)._process_portbindings_create_and_update( - context, port_data, port) - portinfo_changed = self._process_portbindings_portinfo_update( - context, port_data, port) - return portinfo_changed - - def _get_port_handler(self, operation, device_owner): - handlers = self.port_handlers[operation] - handler = handlers.get(device_owner) - if handler: - return handler - else: - return handlers['default'] - - def create_port(self, context, port): - """Create a new port entry on DB, then try to activate it.""" - LOG.debug(_("NECPluginV2.create_port() called, port=%s ."), port) - - port['port']['status'] = const.PORT_STATUS_DOWN - - port_data = port['port'] - with context.session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - port = super(NECPluginV2, self).create_port(context, port) - self._process_portbindings_create(context, port_data, port) - self._process_port_create_security_group( - context, port, sgids) - port[addr_pair.ADDRESS_PAIRS] = ( - self._process_create_allowed_address_pairs( - context, port, - port_data.get(addr_pair.ADDRESS_PAIRS))) - self.notify_security_groups_member_updated(context, port) - - handler = self._get_port_handler('create', port['device_owner']) - return handler(context, port) - - def _update_ofc_port_if_required(self, context, old_port, new_port, - portinfo_changed): - def get_ofport_exist(port): - return (port['admin_state_up'] and - bool(port.get(portbindings.PROFILE))) - - # Determine it is required to update OFC port - need_add = False - need_del = False - need_packet_filter_update = False - - old_ofport_exist = get_ofport_exist(old_port) - new_ofport_exist = get_ofport_exist(new_port) - - if old_port['admin_state_up'] != new_port['admin_state_up']: - if new_port['admin_state_up']: - need_add |= new_ofport_exist - else: - need_del |= old_ofport_exist - - if portinfo_changed: - if portinfo_changed in ['DEL', 'MOD']: - need_del |= old_ofport_exist - if portinfo_changed in ['ADD', 'MOD']: - need_add |= new_ofport_exist - need_packet_filter_update |= True - - # Update OFC port if required - if need_del: - self.deactivate_port(context, new_port) - if need_packet_filter_update: - self.deactivate_packet_filters_by_port(context, id) - if need_add: - if need_packet_filter_update: - self.activate_packet_filters_by_port(context, id) - self.activate_port_if_ready(context, new_port) - - def update_port(self, context, id, port): - """Update port, and handle packetfilters associated with the port. - - Update network entry on DB. If admin_state_up was changed, activate - or deactivate the port and packetfilters associated with it. - """ - LOG.debug(_("NECPluginV2.update_port() called, " - "id=%(id)s port=%(port)s ."), - {'id': id, 'port': port}) - need_port_update_notify = False - with context.session.begin(subtransactions=True): - old_port = super(NECPluginV2, self).get_port(context, id) - new_port = super(NECPluginV2, self).update_port(context, id, port) - portinfo_changed = self._process_portbindings_update( - context, port['port'], new_port) - if addr_pair.ADDRESS_PAIRS in port['port']: - need_port_update_notify |= ( - self.update_address_pairs_on_port(context, id, port, - old_port, - new_port)) - need_port_update_notify |= self.update_security_group_on_port( - context, id, port, old_port, new_port) - - need_port_update_notify |= self.is_security_group_member_updated( - context, old_port, new_port) - if need_port_update_notify: - self.notifier.port_update(context, new_port) - - self._update_ofc_port_if_required(context, old_port, new_port, - portinfo_changed) - return new_port - - def delete_port(self, context, id, l3_port_check=True): - """Delete port and packet_filters associated with the port.""" - LOG.debug(_("NECPluginV2.delete_port() called, id=%s ."), id) - # ext_sg.SECURITYGROUPS attribute for the port is required - # since notifier.security_groups_member_updated() need the attribute. - # Thus we need to call self.get_port() instead of super().get_port() - port_db = self._get_port(context, id) - port = self._make_port_dict(port_db) - - handler = self._get_port_handler('delete', port['device_owner']) - # handler() raises an exception if an error occurs during processing. - port = handler(context, port) - - # delete all packet_filters of the port from the controller - for pf in port_db.packetfilters: - self.delete_packet_filter(context, pf['id']) - - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - with context.session.begin(subtransactions=True): - self.disassociate_floatingips(context, id) - self._delete_port_security_group_bindings(context, id) - super(NECPluginV2, self).delete_port(context, id) - self.notify_security_groups_member_updated(context, port) - - -class NECPluginV2AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - '''RPC API for NEC plugin agent.''' - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(NECPluginV2AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_port_update = topics.get_topic_name( - topic, topics.PORT, topics.UPDATE) - - def port_update(self, context, port): - self.fanout_cast(context, - self.make_msg('port_update', - port=port), - topic=self.topic_port_update) - - -class DhcpRpcCallback(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin): - # DhcpPluginApi BASE_RPC_API_VERSION - RPC_API_VERSION = '1.1' - - -class L3RpcCallback(rpc_compat.RpcCallback, l3_rpc_base.L3RpcCallbackMixin): - # 1.0 L3PluginApi BASE_RPC_API_VERSION - # 1.1 Support update_floatingip_statuses - RPC_API_VERSION = '1.1' - - -class SecurityGroupServerRpcCallback( - rpc_compat.RpcCallback, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - - RPC_API_VERSION = sg_rpc.SG_RPC_VERSION - - @staticmethod - def get_port_from_device(device): - port = ndb.get_port_from_device(device) - if port: - port['device'] = device - LOG.debug(_("NECPluginV2RPCCallbacks.get_port_from_device() called, " - "device=%(device)s => %(ret)s."), - {'device': device, 'ret': port}) - return port - - -class NECPluginV2RPCCallbacks(rpc_compat.RpcCallback): - - RPC_API_VERSION = '1.0' - - def __init__(self, plugin): - super(NECPluginV2RPCCallbacks, self).__init__() - self.plugin = plugin - - def update_ports(self, rpc_context, **kwargs): - """Update ports' information and activate/deavtivate them. - - Expected input format is: - {'topic': 'q-agent-notifier', - 'agent_id': 'nec-q-agent.' + , - 'datapath_id': , - 'port_added': [,...], - 'port_removed': [,...]} - """ - LOG.debug(_("NECPluginV2RPCCallbacks.update_ports() called, " - "kwargs=%s ."), kwargs) - datapath_id = kwargs['datapath_id'] - session = rpc_context.session - for p in kwargs.get('port_added', []): - id = p['id'] - portinfo = ndb.get_portinfo(session, id) - if portinfo: - if (necutils.cmp_dpid(portinfo.datapath_id, datapath_id) and - portinfo.port_no == p['port_no']): - LOG.debug(_("update_ports(): ignore unchanged portinfo in " - "port_added message (port_id=%s)."), id) - continue - ndb.del_portinfo(session, id) - port = self._get_port(rpc_context, id) - if port: - ndb.add_portinfo(session, id, datapath_id, p['port_no'], - mac=p.get('mac', '')) - # NOTE: Make sure that packet filters on this port exist while - # the port is active to avoid unexpected packet transfer. - if portinfo: - self.plugin.deactivate_port(rpc_context, port, - raise_exc=False) - self.plugin.deactivate_packet_filters_by_port( - rpc_context, id, raise_exc=False) - self.plugin.activate_packet_filters_by_port(rpc_context, id) - self.plugin.activate_port_if_ready(rpc_context, port) - for id in kwargs.get('port_removed', []): - portinfo = ndb.get_portinfo(session, id) - if not portinfo: - LOG.debug(_("update_ports(): ignore port_removed message " - "due to portinfo for port_id=%s was not " - "registered"), id) - continue - if not necutils.cmp_dpid(portinfo.datapath_id, datapath_id): - LOG.debug(_("update_ports(): ignore port_removed message " - "received from different host " - "(registered_datapath_id=%(registered)s, " - "received_datapath_id=%(received)s)."), - {'registered': portinfo.datapath_id, - 'received': datapath_id}) - continue - ndb.del_portinfo(session, id) - port = self._get_port(rpc_context, id) - if port: - self.plugin.deactivate_port(rpc_context, port, raise_exc=False) - self.plugin.deactivate_packet_filters_by_port( - rpc_context, id, raise_exc=False) - - def _get_port(self, context, port_id): - try: - return self.plugin.get_port(context, port_id) - except n_exc.PortNotFound: - return None diff --git a/neutron/plugins/nec/nec_router.py b/neutron/plugins/nec/nec_router.py deleted file mode 100644 index e1a6ef6c9..000000000 --- a/neutron/plugins/nec/nec_router.py +++ /dev/null @@ -1,358 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Akihiro Motoki - -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.v2 import attributes as attr -from neutron.common import exceptions as n_exc -from neutron.db import db_base_plugin_v2 -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_db -from neutron.db import l3_gwmode_db -from neutron.db import models_v2 -from neutron.extensions import l3 -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import config -from neutron.plugins.nec.common import constants as nconst -from neutron.plugins.nec.common import exceptions as nexc -from neutron.plugins.nec.db import router as rdb -from neutron.plugins.nec.extensions import router_provider as ext_provider - -LOG = logging.getLogger(__name__) - -PROVIDER_L3AGENT = nconst.ROUTER_PROVIDER_L3AGENT -PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW - -ROUTER_DRIVER_PATH = 'neutron.plugins.nec.router_drivers.' -ROUTER_DRIVER_MAP = { - PROVIDER_L3AGENT: ROUTER_DRIVER_PATH + 'RouterL3AgentDriver', - PROVIDER_OPENFLOW: ROUTER_DRIVER_PATH + 'RouterOpenFlowDriver' -} - -ROUTER_DRIVERS = {} - -STATUS_ACTIVE = nconst.ROUTER_STATUS_ACTIVE -STATUS_ERROR = nconst.ROUTER_STATUS_ERROR - - -class RouterMixin(extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin): - - def create_router(self, context, router): - """Create a new router entry on DB, and create it on OFC.""" - LOG.debug(_("RouterMixin.create_router() called, " - "router=%s ."), router) - tenant_id = self._get_tenant_id_for_create(context, router['router']) - - provider = get_provider_with_default( - router['router'].get(ext_provider.ROUTER_PROVIDER)) - driver = get_driver_by_provider(provider) - - with context.session.begin(subtransactions=True): - new_router = super(RouterMixin, self).create_router(context, - router) - new_router['gw_port'] = self._get_gw_port_detail( - context, driver, new_router['gw_port_id']) - rdb.add_router_provider_binding(context.session, - provider, str(new_router['id'])) - self._extend_router_dict_provider(new_router, provider) - - # create router on the network controller - try: - return driver.create_router(context, tenant_id, new_router) - except nexc.RouterOverLimit: - with excutils.save_and_reraise_exception(): - super(RouterMixin, self).delete_router(context, - new_router['id']) - - def update_router(self, context, router_id, router): - LOG.debug(_("RouterMixin.update_router() called, " - "id=%(id)s, router=%(router)s ."), - {'id': router_id, 'router': router}) - - with context.session.begin(subtransactions=True): - old_rtr = super(RouterMixin, self).get_router(context, router_id) - provider = old_rtr[ext_provider.ROUTER_PROVIDER] - driver = get_driver_by_provider(provider) - old_rtr['gw_port'] = self._get_gw_port_detail( - context, driver, old_rtr['gw_port_id']) - new_rtr = super(RouterMixin, self).update_router( - context, router_id, router) - new_rtr['gw_port'] = self._get_gw_port_detail( - context, driver, new_rtr['gw_port_id']) - driver.update_router(context, router_id, old_rtr, new_rtr) - return new_rtr - - def delete_router(self, context, router_id): - LOG.debug(_("RouterMixin.delete_router() called, id=%s."), router_id) - - router = super(RouterMixin, self).get_router(context, router_id) - tenant_id = router['tenant_id'] - # Since l3_db.delete_router() has no interaction with the plugin layer, - # we need to check if the router can be deleted first. - self._check_router_in_use(context, router_id) - driver = self._get_router_driver_by_id(context, router_id) - # If gw_port exists, remove it. - gw_port = self._get_gw_port(context, router_id) - if gw_port: - driver.delete_interface(context, router_id, gw_port) - driver.delete_router(context, router_id, router) - - super(RouterMixin, self).delete_router(context, router_id) - - self._cleanup_ofc_tenant(context, tenant_id) - - def add_router_interface(self, context, router_id, interface_info): - LOG.debug(_("RouterMixin.add_router_interface() called, " - "id=%(id)s, interface=%(interface)s."), - {'id': router_id, 'interface': interface_info}) - return super(RouterMixin, self).add_router_interface( - context, router_id, interface_info) - - def remove_router_interface(self, context, router_id, interface_info): - LOG.debug(_("RouterMixin.remove_router_interface() called, " - "id=%(id)s, interface=%(interface)s."), - {'id': router_id, 'interface': interface_info}) - return super(RouterMixin, self).remove_router_interface( - context, router_id, interface_info) - - def create_router_port(self, context, port): - # This method is called from plugin.create_port() - router_id = port['device_id'] - driver = self._get_router_driver_by_id(context, router_id) - port = driver.add_interface(context, router_id, port) - return port - - def delete_router_port(self, context, port): - # This method is called from plugin.delete_port() - router_id = port['device_id'] - driver = self._get_router_driver_by_id(context, router_id) - return driver.delete_interface(context, router_id, port) - - def _get_gw_port_detail(self, context, driver, gw_port_id): - if not gw_port_id or not driver.need_gw_info: - return - ctx_elevated = context.elevated() - gw_port = self._get_port(ctx_elevated, gw_port_id) - # At this moment gw_port has been created, so it is guaranteed - # that fixed_ip is assigned for the gw_port. - ext_subnet_id = gw_port['fixed_ips'][0]['subnet_id'] - ext_subnet = self._get_subnet(ctx_elevated, ext_subnet_id) - gw_info = {'network_id': gw_port['network_id'], - 'ip_address': gw_port['fixed_ips'][0]['ip_address'], - 'mac_address': gw_port['mac_address'], - 'cidr': ext_subnet['cidr'], - 'gateway_ip': ext_subnet['gateway_ip']} - return gw_info - - def _get_gw_port(self, context, router_id): - device_filter = {'device_id': [router_id], - 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} - ports = self.get_ports(context.elevated(), filters=device_filter) - if ports: - return ports[0] - - def _check_router_in_use(self, context, router_id): - with context.session.begin(subtransactions=True): - # Ensure that the router is not used - router_filter = {'router_id': [router_id]} - fips = self.get_floatingips_count(context.elevated(), - filters=router_filter) - if fips: - raise l3.RouterInUse(router_id=router_id) - - device_filter = {'device_id': [router_id], - 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} - ports = self.get_ports_count(context.elevated(), - filters=device_filter) - if ports: - raise l3.RouterInUse(router_id=router_id) - - def _get_router_for_floatingip(self, context, internal_port, - internal_subnet_id, - external_network_id): - """Get a router for a requested floating IP. - - OpenFlow vrouter does not support NAT, so we need to exclude them - from candidate routers for floating IP association. - This method is called in l3_db.get_assoc_data(). - """ - subnet_db = self._get_subnet(context, internal_subnet_id) - if not subnet_db['gateway_ip']: - msg = (_('Cannot add floating IP to port on subnet %s ' - 'which has no gateway_ip') % internal_subnet_id) - raise n_exc.BadRequest(resource='floatingip', msg=msg) - - # find router interface ports on this network - router_intf_qry = context.session.query(models_v2.Port) - router_intf_ports = router_intf_qry.filter_by( - network_id=internal_port['network_id'], - device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF) - - for intf_p in router_intf_ports: - if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id: - router_id = intf_p['device_id'] - router_gw_qry = context.session.query(models_v2.Port) - has_gw_port = router_gw_qry.filter_by( - network_id=external_network_id, - device_id=router_id, - device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).count() - driver = self._get_router_driver_by_id(context, router_id) - if (has_gw_port and driver.floating_ip_support()): - return router_id - - raise l3.ExternalGatewayForFloatingIPNotFound( - subnet_id=internal_subnet_id, - external_network_id=external_network_id, - port_id=internal_port['id']) - - def _get_sync_routers(self, context, router_ids=None, active=None): - """Query routers and their gw ports for l3 agent. - - The difference from the superclass in l3_db is that this method - only lists routers hosted on l3-agents. - """ - router_list = super(RouterMixin, self)._get_sync_routers( - context, router_ids, active) - if router_list: - _router_ids = [r['id'] for r in router_list] - agent_routers = rdb.get_routers_by_provider( - context.session, 'l3-agent', - router_ids=_router_ids) - router_list = [r for r in router_list - if r['id'] in agent_routers] - return router_list - - def _get_router_driver_by_id(self, context, router_id): - provider = self._get_provider_by_router_id(context, router_id) - return get_driver_by_provider(provider) - - def _get_provider_by_router_id(self, context, router_id): - return rdb.get_provider_by_router(context.session, router_id) - - def _extend_router_dict_provider(self, router_res, provider): - router_res[ext_provider.ROUTER_PROVIDER] = provider - - def extend_router_dict_provider(self, router_res, router_db): - # NOTE: router_db.provider is None just after creating a router, - # so we need to skip setting router_provider here. - if not router_db.provider: - return - self._extend_router_dict_provider(router_res, - router_db.provider['provider']) - - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - l3.ROUTERS, [extend_router_dict_provider]) - - -class L3AgentSchedulerDbMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin): - - def auto_schedule_routers(self, context, host, router_ids): - router_ids = rdb.get_routers_by_provider( - context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) - # If no l3-agent hosted router, there is no need to schedule. - if not router_ids: - return - return super(L3AgentSchedulerDbMixin, self).auto_schedule_routers( - context, host, router_ids) - - def schedule_router(self, context, router): - if (self._get_provider_by_router_id(context, router) == - nconst.ROUTER_PROVIDER_L3AGENT): - return super(L3AgentSchedulerDbMixin, self).schedule_router( - context, router) - - def add_router_to_l3_agent(self, context, id, router_id): - provider = self._get_provider_by_router_id(context, router_id) - if provider != nconst.ROUTER_PROVIDER_L3AGENT: - raise nexc.RouterProviderMismatch( - router_id=router_id, provider=provider, - expected_provider=nconst.ROUTER_PROVIDER_L3AGENT) - return super(L3AgentSchedulerDbMixin, self).add_router_to_l3_agent( - context, id, router_id) - - -class L3AgentNotifyAPI(l3_rpc_agent_api.L3AgentNotifyAPI): - - def _notification(self, context, method, router_ids, operation, data): - """Notify all the agents that are hosting the routers. - - _notification() is called in L3 db plugin for all routers regardless - the routers are hosted on l3 agents or not. When the routers are - not hosted on l3 agents, there is no need to notify. - This method filters routers not hosted by l3 agents. - """ - router_ids = rdb.get_routers_by_provider( - context.session, nconst.ROUTER_PROVIDER_L3AGENT, router_ids) - super(L3AgentNotifyAPI, self)._notification( - context, method, router_ids, operation, data) - - -def load_driver(plugin, ofc_manager): - - if (PROVIDER_OPENFLOW in ROUTER_DRIVER_MAP and - not ofc_manager.driver.router_supported): - LOG.warning( - _('OFC does not support router with provider=%(provider)s, ' - 'so removed it from supported provider ' - '(new router driver map=%(driver_map)s)'), - {'provider': PROVIDER_OPENFLOW, - 'driver_map': ROUTER_DRIVER_MAP}) - del ROUTER_DRIVER_MAP[PROVIDER_OPENFLOW] - - if config.PROVIDER.default_router_provider not in ROUTER_DRIVER_MAP: - LOG.error(_('default_router_provider %(default)s is supported! ' - 'Please specify one of %(supported)s'), - {'default': config.PROVIDER.default_router_provider, - 'supported': ROUTER_DRIVER_MAP.keys()}) - raise SystemExit(1) - - enabled_providers = (set(config.PROVIDER.router_providers + - [config.PROVIDER.default_router_provider]) & - set(ROUTER_DRIVER_MAP.keys())) - - for driver in enabled_providers: - driver_klass = importutils.import_class(ROUTER_DRIVER_MAP[driver]) - ROUTER_DRIVERS[driver] = driver_klass(plugin, ofc_manager) - - LOG.info(_('Enabled router drivers: %s'), ROUTER_DRIVERS.keys()) - - if not ROUTER_DRIVERS: - LOG.error(_('No router provider is enabled. neutron-server terminated!' - ' (supported=%(supported)s, configured=%(config)s)'), - {'supported': ROUTER_DRIVER_MAP.keys(), - 'config': config.PROVIDER.router_providers}) - raise SystemExit(1) - - -def get_provider_with_default(provider): - if not attr.is_attr_set(provider): - provider = config.PROVIDER.default_router_provider - elif provider not in ROUTER_DRIVERS: - raise nexc.ProviderNotFound(provider=provider) - return provider - - -def get_driver_by_provider(provider): - if provider is None: - provider = config.PROVIDER.default_router_provider - elif provider not in ROUTER_DRIVERS: - raise nexc.ProviderNotFound(provider=provider) - return ROUTER_DRIVERS[provider] diff --git a/neutron/plugins/nec/ofc_driver_base.py b/neutron/plugins/nec/ofc_driver_base.py deleted file mode 100644 index cde69c36d..000000000 --- a/neutron/plugins/nec/ofc_driver_base.py +++ /dev/null @@ -1,105 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -import abc -import six - - -@six.add_metaclass(abc.ABCMeta) -class OFCDriverBase(object): - """OpenFlow Controller (OFC) Driver Specification. - - OFCDriverBase defines the minimum set of methods required by this plugin. - It would be better that other methods like update_* are implemented. - """ - - @abc.abstractmethod - def create_tenant(self, description, tenant_id=None): - """Create a new tenant at OpenFlow Controller. - - :param description: A description of this tenant. - :param tenant_id: A hint of OFC tenant ID. - A driver could use this id as a OFC id or ignore it. - :returns: ID of the tenant created at OpenFlow Controller. - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass - - @abc.abstractmethod - def delete_tenant(self, ofc_tenant_id): - """Delete a tenant at OpenFlow Controller. - - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass - - @abc.abstractmethod - def create_network(self, ofc_tenant_id, description, network_id=None): - """Create a new network on specified OFC tenant at OpenFlow Controller. - - :param ofc_tenant_id: a OFC tenant ID in which a new network belongs. - :param description: A description of this network. - :param network_id: A hint of an ID of OFC network. - :returns: ID of the network created at OpenFlow Controller. - ID returned must be unique in the OpenFlow Controller. - If a network is identified in conjunction with other information - such as a tenant ID, such information should be included in the ID. - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass - - @abc.abstractmethod - def delete_network(self, ofc_network_id): - """Delete a netwrok at OpenFlow Controller. - - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass - - @abc.abstractmethod - def create_port(self, ofc_network_id, portinfo, - port_id=None, filters=None): - """Create a new port on specified network at OFC. - - :param ofc_network_id: a OFC tenant ID in which a new port belongs. - :param portinfo: An OpenFlow information of this port. - {'datapath_id': Switch ID that a port connected. - 'port_no': Port Number that a port connected on a Swtich. - 'vlan_id': VLAN ID that a port tagging. - 'mac': Mac address. - } - :param port_id: A hint of an ID of OFC port. - ID returned must be unique in the OpenFlow Controller. - - If a port is identified in combination with a network or - a tenant, such information should be included in the ID. - :param filters: A list of packet filter associated with the port. - Each element is a tuple (neutron ID, OFC ID) - - :returns: ID of the port created at OpenFlow Controller. - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass - - @abc.abstractmethod - def delete_port(self, ofc_port_id): - """Delete a port at OpenFlow Controller. - - :raises: neutron.plugin.nec.common.exceptions.OFCException - """ - pass diff --git a/neutron/plugins/nec/ofc_manager.py b/neutron/plugins/nec/ofc_manager.py deleted file mode 100644 index a448a5445..000000000 --- a/neutron/plugins/nec/ofc_manager.py +++ /dev/null @@ -1,201 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU -# @author: Akihiro MOTOKI - -import netaddr - -from neutron.common import utils -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import config -from neutron.plugins.nec.common import exceptions as nexc -from neutron.plugins.nec.db import api as ndb -from neutron.plugins.nec import drivers - - -LOG = logging.getLogger(__name__) - - -class OFCManager(object): - """This class manages an OpenFlow Controller and map resources. - - This class manage an OpenFlow Controller (OFC) with a driver specified in - a configuration of this plugin. This keeps mappings between IDs on Neutron - and OFC for various entities such as Tenant, Network and Filter. A Port on - OFC is identified by a switch ID 'datapath_id' and a port number 'port_no' - of the switch. An ID named as 'ofc_*' is used to identify resource on OFC. - """ - - def __init__(self, plugin): - self.driver = drivers.get_driver(config.OFC.driver)(config.OFC) - self.plugin = plugin - - def _get_ofc_id(self, context, resource, neutron_id): - return ndb.get_ofc_id(context.session, resource, neutron_id) - - def _exists_ofc_item(self, context, resource, neutron_id): - return ndb.exists_ofc_item(context.session, resource, neutron_id) - - def _add_ofc_item(self, context, resource, neutron_id, ofc_id): - # Ensure a new item is added to the new mapping table - ndb.add_ofc_item(context.session, resource, neutron_id, ofc_id) - - def _del_ofc_item(self, context, resource, neutron_id): - ndb.del_ofc_item(context.session, resource, neutron_id) - - def ensure_ofc_tenant(self, context, tenant_id): - if not self.exists_ofc_tenant(context, tenant_id): - self.create_ofc_tenant(context, tenant_id) - - def create_ofc_tenant(self, context, tenant_id): - desc = "ID=%s at OpenStack." % tenant_id - ofc_tenant_id = self.driver.create_tenant(desc, tenant_id) - self._add_ofc_item(context, "ofc_tenant", tenant_id, ofc_tenant_id) - - def exists_ofc_tenant(self, context, tenant_id): - return self._exists_ofc_item(context, "ofc_tenant", tenant_id) - - def delete_ofc_tenant(self, context, tenant_id): - ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) - self.driver.delete_tenant(ofc_tenant_id) - self._del_ofc_item(context, "ofc_tenant", tenant_id) - - def create_ofc_network(self, context, tenant_id, network_id, - network_name=None): - ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) - desc = "ID=%s Name=%s at Neutron." % (network_id, network_name) - ofc_net_id = self.driver.create_network(ofc_tenant_id, desc, - network_id) - self._add_ofc_item(context, "ofc_network", network_id, ofc_net_id) - - def exists_ofc_network(self, context, network_id): - return self._exists_ofc_item(context, "ofc_network", network_id) - - def delete_ofc_network(self, context, network_id, network): - ofc_net_id = self._get_ofc_id(context, "ofc_network", network_id) - self.driver.delete_network(ofc_net_id) - self._del_ofc_item(context, "ofc_network", network_id) - - def create_ofc_port(self, context, port_id, port): - ofc_net_id = self._get_ofc_id(context, "ofc_network", - port['network_id']) - portinfo = ndb.get_portinfo(context.session, port_id) - if not portinfo: - raise nexc.PortInfoNotFound(id=port_id) - - # Associate packet filters - filters = self.plugin.get_packet_filters_for_port(context, port) - if filters is not None: - params = {'filters': filters} - else: - params = {} - - ofc_port_id = self.driver.create_port(ofc_net_id, portinfo, port_id, - **params) - self._add_ofc_item(context, "ofc_port", port_id, ofc_port_id) - - def exists_ofc_port(self, context, port_id): - return self._exists_ofc_item(context, "ofc_port", port_id) - - def delete_ofc_port(self, context, port_id, port): - ofc_port_id = self._get_ofc_id(context, "ofc_port", port_id) - self.driver.delete_port(ofc_port_id) - self._del_ofc_item(context, "ofc_port", port_id) - - def create_ofc_packet_filter(self, context, filter_id, filter_dict): - ofc_net_id = self._get_ofc_id(context, "ofc_network", - filter_dict['network_id']) - in_port_id = filter_dict.get('in_port') - portinfo = None - if in_port_id: - portinfo = ndb.get_portinfo(context.session, in_port_id) - if not portinfo: - raise nexc.PortInfoNotFound(id=in_port_id) - - # Collect ports to be associated with the filter - apply_ports = ndb.get_active_ports_on_ofc( - context, filter_dict['network_id'], in_port_id) - ofc_pf_id = self.driver.create_filter(ofc_net_id, - filter_dict, portinfo, filter_id, - apply_ports) - self._add_ofc_item(context, "ofc_packet_filter", filter_id, ofc_pf_id) - - def update_ofc_packet_filter(self, context, filter_id, filter_dict): - ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) - ofc_pf_id = self.driver.convert_ofc_filter_id(context, ofc_pf_id) - self.driver.update_filter(ofc_pf_id, filter_dict) - - def exists_ofc_packet_filter(self, context, filter_id): - return self._exists_ofc_item(context, "ofc_packet_filter", filter_id) - - def delete_ofc_packet_filter(self, context, filter_id): - ofc_pf_id = self._get_ofc_id(context, "ofc_packet_filter", filter_id) - self.driver.delete_filter(ofc_pf_id) - self._del_ofc_item(context, "ofc_packet_filter", filter_id) - - def create_ofc_router(self, context, tenant_id, router_id, name=None): - ofc_tenant_id = self._get_ofc_id(context, "ofc_tenant", tenant_id) - desc = "ID=%s Name=%s at Neutron." % (router_id, name) - ofc_router_id = self.driver.create_router(ofc_tenant_id, router_id, - desc) - self._add_ofc_item(context, "ofc_router", router_id, ofc_router_id) - - def exists_ofc_router(self, context, router_id): - return self._exists_ofc_item(context, "ofc_router", router_id) - - def delete_ofc_router(self, context, router_id, router): - ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) - self.driver.delete_router(ofc_router_id) - self._del_ofc_item(context, "ofc_router", router_id) - - def add_ofc_router_interface(self, context, router_id, port_id, port): - # port must have the following fields: - # network_id, cidr, ip_address, mac_address - ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) - ofc_net_id = self._get_ofc_id(context, "ofc_network", - port['network_id']) - ip_address = '%s/%s' % (port['ip_address'], - netaddr.IPNetwork(port['cidr']).prefixlen) - mac_address = port['mac_address'] - ofc_inf_id = self.driver.add_router_interface( - ofc_router_id, ofc_net_id, ip_address, mac_address) - # Use port mapping table to maintain an interface of OFC router - self._add_ofc_item(context, "ofc_port", port_id, ofc_inf_id) - - def delete_ofc_router_interface(self, context, router_id, port_id): - # Use port mapping table to maintain an interface of OFC router - ofc_inf_id = self._get_ofc_id(context, "ofc_port", port_id) - self.driver.delete_router_interface(ofc_inf_id) - self._del_ofc_item(context, "ofc_port", port_id) - - def update_ofc_router_route(self, context, router_id, new_routes): - ofc_router_id = self._get_ofc_id(context, "ofc_router", router_id) - ofc_routes = self.driver.list_router_routes(ofc_router_id) - route_dict = {} - cur_routes = [] - for r in ofc_routes: - key = ','.join((r['destination'], r['nexthop'])) - route_dict[key] = r['id'] - del r['id'] - cur_routes.append(r) - added, removed = utils.diff_list_of_dict(cur_routes, new_routes) - for r in removed: - key = ','.join((r['destination'], r['nexthop'])) - route_id = route_dict[key] - self.driver.delete_router_route(route_id) - for r in added: - self.driver.add_router_route(ofc_router_id, r['destination'], - r['nexthop']) diff --git a/neutron/plugins/nec/packet_filter.py b/neutron/plugins/nec/packet_filter.py deleted file mode 100644 index df48ebff8..000000000 --- a/neutron/plugins/nec/packet_filter.py +++ /dev/null @@ -1,258 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012-2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Ryota MIBU - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import config -from neutron.plugins.nec.common import exceptions as nexc -from neutron.plugins.nec.db import api as ndb -from neutron.plugins.nec.db import packetfilter as pf_db - - -LOG = logging.getLogger(__name__) - - -class PacketFilterMixin(pf_db.PacketFilterDbMixin): - """Mixin class to add packet filter to NECPluginV2.""" - - @property - def packet_filter_enabled(self): - if not hasattr(self, '_packet_filter_enabled'): - self._packet_filter_enabled = ( - config.OFC.enable_packet_filter and - self.ofc.driver.filter_supported()) - return self._packet_filter_enabled - - def remove_packet_filter_extension_if_disabled(self, aliases): - if not self.packet_filter_enabled: - LOG.debug(_('Disabled packet-filter extension.')) - aliases.remove('packet-filter') - - def create_packet_filter(self, context, packet_filter): - """Create a new packet_filter entry on DB, then try to activate it.""" - LOG.debug(_("create_packet_filter() called, packet_filter=%s ."), - packet_filter) - - if hasattr(self.ofc.driver, 'validate_filter_create'): - pf = packet_filter['packet_filter'] - self.ofc.driver.validate_filter_create(context, pf) - pf = super(PacketFilterMixin, self).create_packet_filter( - context, packet_filter) - - return self.activate_packet_filter_if_ready(context, pf) - - def update_packet_filter(self, context, id, packet_filter): - """Update packet_filter entry on DB, and recreate it if changed. - - If any rule of the packet_filter was changed, recreate it on OFC. - """ - LOG.debug(_("update_packet_filter() called, " - "id=%(id)s packet_filter=%(packet_filter)s ."), - {'id': id, 'packet_filter': packet_filter}) - - pf_data = packet_filter['packet_filter'] - if hasattr(self.ofc.driver, 'validate_filter_update'): - self.ofc.driver.validate_filter_update(context, pf_data) - - # validate ownership - pf_old = self.get_packet_filter(context, id) - - pf = super(PacketFilterMixin, self).update_packet_filter( - context, id, packet_filter) - - def _packet_filter_changed(old_pf, new_pf): - LOG.debug('old_pf=%(old_pf)s, new_pf=%(new_pf)s', - {'old_pf': old_pf, 'new_pf': new_pf}) - # When the status is ERROR, force sync to OFC. - if old_pf['status'] == pf_db.PF_STATUS_ERROR: - LOG.debug('update_packet_filter: Force filter update ' - 'because the previous status is ERROR.') - return True - for key in new_pf: - if key in ('id', 'name', 'tenant_id', 'network_id', - 'in_port', 'status'): - continue - if old_pf[key] != new_pf[key]: - return True - return False - - if _packet_filter_changed(pf_old, pf): - if hasattr(self.ofc.driver, 'update_filter'): - # admin_state is changed - if pf_old['admin_state_up'] != pf['admin_state_up']: - LOG.debug('update_packet_filter: admin_state ' - 'is changed to %s', pf['admin_state_up']) - if pf['admin_state_up']: - self.activate_packet_filter_if_ready(context, pf) - else: - self.deactivate_packet_filter(context, pf) - elif pf['admin_state_up']: - LOG.debug('update_packet_filter: admin_state is ' - 'unchanged (True)') - if self.ofc.exists_ofc_packet_filter(context, id): - pf = self._update_packet_filter(context, pf, pf_data) - else: - pf = self.activate_packet_filter_if_ready(context, pf) - else: - LOG.debug('update_packet_filter: admin_state is unchanged ' - '(False). No need to update OFC filter.') - else: - pf = self.deactivate_packet_filter(context, pf) - pf = self.activate_packet_filter_if_ready(context, pf) - - return pf - - def _update_packet_filter(self, context, new_pf, pf_data): - pf_id = new_pf['id'] - prev_status = new_pf['status'] - try: - # If previous status is ERROR, try to sync all attributes. - pf = new_pf if prev_status == pf_db.PF_STATUS_ERROR else pf_data - self.ofc.update_ofc_packet_filter(context, pf_id, pf) - new_status = pf_db.PF_STATUS_ACTIVE - if new_status != prev_status: - self._update_resource_status(context, "packet_filter", - pf_id, new_status) - new_pf['status'] = new_status - return new_pf - except Exception as exc: - with excutils.save_and_reraise_exception(): - if (isinstance(exc, nexc.OFCException) or - isinstance(exc, nexc.OFCConsistencyBroken)): - LOG.error(_("Failed to create packet_filter id=%(id)s on " - "OFC: %(exc)s"), - {'id': pf_id, 'exc': exc}) - new_status = pf_db.PF_STATUS_ERROR - if new_status != prev_status: - self._update_resource_status(context, "packet_filter", - pf_id, new_status) - - def delete_packet_filter(self, context, id): - """Deactivate and delete packet_filter.""" - LOG.debug(_("delete_packet_filter() called, id=%s ."), id) - - # validate ownership - pf = self.get_packet_filter(context, id) - - # deactivate_packet_filter() raises an exception - # if an error occurs during processing. - pf = self.deactivate_packet_filter(context, pf) - - super(PacketFilterMixin, self).delete_packet_filter(context, id) - - def activate_packet_filter_if_ready(self, context, packet_filter): - """Activate packet_filter by creating filter on OFC if ready. - - Conditions to create packet_filter on OFC are: - * packet_filter admin_state is UP - * (if 'in_port' is specified) portinfo is available - """ - LOG.debug(_("activate_packet_filter_if_ready() called, " - "packet_filter=%s."), packet_filter) - - pf_id = packet_filter['id'] - in_port_id = packet_filter.get('in_port') - current = packet_filter['status'] - - pf_status = current - if not packet_filter['admin_state_up']: - LOG.debug(_("activate_packet_filter_if_ready(): skip pf_id=%s, " - "packet_filter.admin_state_up is False."), pf_id) - elif in_port_id and not ndb.get_portinfo(context.session, in_port_id): - LOG.debug(_("activate_packet_filter_if_ready(): skip " - "pf_id=%s, no portinfo for the in_port."), pf_id) - elif self.ofc.exists_ofc_packet_filter(context, packet_filter['id']): - LOG.debug(_("_activate_packet_filter_if_ready(): skip, " - "ofc_packet_filter already exists.")) - else: - LOG.debug(_("activate_packet_filter_if_ready(): create " - "packet_filter id=%s on OFC."), pf_id) - try: - self.ofc.create_ofc_packet_filter(context, pf_id, - packet_filter) - pf_status = pf_db.PF_STATUS_ACTIVE - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - LOG.error(_("Failed to create packet_filter id=%(id)s on " - "OFC: %(exc)s"), {'id': pf_id, 'exc': exc}) - pf_status = pf_db.PF_STATUS_ERROR - - if pf_status != current: - self._update_resource_status(context, "packet_filter", pf_id, - pf_status) - packet_filter.update({'status': pf_status}) - - return packet_filter - - def deactivate_packet_filter(self, context, packet_filter): - """Deactivate packet_filter by deleting filter from OFC if exixts.""" - LOG.debug(_("deactivate_packet_filter_if_ready() called, " - "packet_filter=%s."), packet_filter) - pf_id = packet_filter['id'] - - if not self.ofc.exists_ofc_packet_filter(context, pf_id): - LOG.debug(_("deactivate_packet_filter(): skip, " - "Not found OFC Mapping for packet_filter id=%s."), - pf_id) - return packet_filter - - LOG.debug(_("deactivate_packet_filter(): " - "deleting packet_filter id=%s from OFC."), pf_id) - try: - self.ofc.delete_ofc_packet_filter(context, pf_id) - self._update_resource_status_if_changed( - context, "packet_filter", packet_filter, pf_db.PF_STATUS_DOWN) - return packet_filter - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - LOG.error(_("Failed to delete packet_filter id=%(id)s " - "from OFC: %(exc)s"), - {'id': pf_id, 'exc': str(exc)}) - self._update_resource_status_if_changed( - context, "packet_filter", packet_filter, - pf_db.PF_STATUS_ERROR) - - def activate_packet_filters_by_port(self, context, port_id): - if not self.packet_filter_enabled: - return - - filters = {'in_port': [port_id], 'admin_state_up': [True], - 'status': [pf_db.PF_STATUS_DOWN]} - pfs = self.get_packet_filters(context, filters=filters) - for pf in pfs: - self.activate_packet_filter_if_ready(context, pf) - - def deactivate_packet_filters_by_port(self, context, port_id, - raise_exc=True): - if not self.packet_filter_enabled: - return - - filters = {'in_port': [port_id], 'status': [pf_db.PF_STATUS_ACTIVE]} - pfs = self.get_packet_filters(context, filters=filters) - error = False - for pf in pfs: - try: - self.deactivate_packet_filter(context, pf) - except (nexc.OFCException, nexc.OFCMappingNotFound): - error = True - if raise_exc and error: - raise nexc.OFCException(_('Error occurred while disabling packet ' - 'filter(s) for port %s'), port_id) - - def get_packet_filters_for_port(self, context, port): - if self.packet_filter_enabled: - return super(PacketFilterMixin, - self).get_packet_filters_for_port(context, port) diff --git a/neutron/plugins/nec/router_drivers.py b/neutron/plugins/nec/router_drivers.py deleted file mode 100644 index 407ea5365..000000000 --- a/neutron/plugins/nec/router_drivers.py +++ /dev/null @@ -1,224 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Akihiro Motoki - -import abc -import httplib - -import six - -from neutron.common import log as call_log -from neutron.common import utils -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.nec.common import constants as nconst -from neutron.plugins.nec.common import exceptions as nexc - -LOG = logging.getLogger(__name__) - -PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW - - -@six.add_metaclass(abc.ABCMeta) -class RouterDriverBase(object): - - def __init__(self, plugin, ofc_manager): - self.plugin = plugin - self.ofc = ofc_manager - - def floating_ip_support(self): - return True - - @abc.abstractmethod - def create_router(self, context, tenant_id, router): - pass - - @abc.abstractmethod - def update_router(self, context, router_id, old_router, new_router): - pass - - @abc.abstractmethod - def delete_router(self, context, router_id, router): - pass - - @abc.abstractmethod - def add_interface(self, context, router_id, port): - pass - - @abc.abstractmethod - def delete_interface(self, context, router_id, port): - pass - - -class RouterL3AgentDriver(RouterDriverBase): - - need_gw_info = False - - @call_log.log - def create_router(self, context, tenant_id, router): - return router - - @call_log.log - def update_router(self, context, router_id, old_router, new_router): - return new_router - - @call_log.log - def delete_router(self, context, router_id, router): - pass - - @call_log.log - def add_interface(self, context, router_id, port): - return self.plugin.activate_port_if_ready(context, port) - - @call_log.log - def delete_interface(self, context, router_id, port): - return self.plugin.deactivate_port(context, port) - - -class RouterOpenFlowDriver(RouterDriverBase): - - need_gw_info = True - - def floating_ip_support(self): - return self.ofc.driver.router_nat_supported - - def _process_gw_port(self, gw_info, routes): - if gw_info and gw_info['gateway_ip']: - routes.append({'destination': '0.0.0.0/0', - 'nexthop': gw_info['gateway_ip']}) - - @call_log.log - def create_router(self, context, tenant_id, router): - try: - router_id = router['id'] - added_routes = [] - self.ofc.ensure_ofc_tenant(context, tenant_id) - self.ofc.create_ofc_router(context, tenant_id, router_id, - router['name']) - self._process_gw_port(router['gw_port'], added_routes) - if added_routes: - self.ofc.update_ofc_router_route(context, router_id, - added_routes, []) - new_status = nconst.ROUTER_STATUS_ACTIVE - self.plugin._update_resource_status(context, "router", - router['id'], - new_status) - router['status'] = new_status - return router - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - if (isinstance(exc, nexc.OFCException) and - exc.status == httplib.CONFLICT): - raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW) - reason = _("create_router() failed due to %s") % exc - LOG.error(reason) - new_status = nconst.ROUTER_STATUS_ERROR - self._update_resource_status(context, "router", - router['id'], - new_status) - - @call_log.log - def update_router(self, context, router_id, old_router, new_router): - old_routes = old_router['routes'][:] - new_routes = new_router['routes'][:] - self._process_gw_port(old_router['gw_port'], old_routes) - self._process_gw_port(new_router['gw_port'], new_routes) - added, removed = utils.diff_list_of_dict(old_routes, new_routes) - if added or removed: - try: - # NOTE(amotoki): PFC supports one-by-one route update at now. - # It means there may be a case where some route is updated but - # some not. To allow the next call of failures to sync routes - # with Neutron side, we pass the whole new routes here. - # PFC should support atomic route update in the future. - self.ofc.update_ofc_router_route(context, router_id, - new_routes) - new_status = nconst.ROUTER_STATUS_ACTIVE - self.plugin._update_resource_status( - context, "router", router_id, new_status) - new_router['status'] = new_status - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - reason = _("_update_ofc_routes() failed due to %s") % exc - LOG.error(reason) - new_status = nconst.ROUTER_STATUS_ERROR - self.plugin._update_resource_status( - context, "router", router_id, new_status) - return new_router - - @call_log.log - def delete_router(self, context, router_id, router): - if not self.ofc.exists_ofc_router(context, router_id): - return - try: - self.ofc.delete_ofc_router(context, router_id, router) - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - LOG.error(_("delete_router() failed due to %s"), exc) - self.plugin._update_resource_status( - context, "router", router_id, nconst.ROUTER_STATUS_ERROR) - - @call_log.log - def add_interface(self, context, router_id, port): - port_id = port['id'] - # port['fixed_ips'] may be empty if ext_net has no subnet. - # Such port is invalid for a router port and we don't create a port - # on OFC. The port is removed in l3_db._create_router_gw_port. - if not port['fixed_ips']: - msg = _('RouterOpenFlowDriver.add_interface(): the requested port ' - 'has no subnet. add_interface() is skipped. ' - 'router_id=%(id)s, port=%(port)s)') - LOG.warning(msg, {'id': router_id, 'port': port}) - return port - fixed_ip = port['fixed_ips'][0] - subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id']) - port_info = {'network_id': port['network_id'], - 'ip_address': fixed_ip['ip_address'], - 'cidr': subnet['cidr'], - 'mac_address': port['mac_address']} - try: - self.ofc.add_ofc_router_interface(context, router_id, - port_id, port_info) - new_status = nconst.ROUTER_STATUS_ACTIVE - self.plugin._update_resource_status( - context, "port", port_id, new_status) - return port - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - reason = _("add_router_interface() failed due to %s") % exc - LOG.error(reason) - new_status = nconst.ROUTER_STATUS_ERROR - self.plugin._update_resource_status( - context, "port", port_id, new_status) - - @call_log.log - def delete_interface(self, context, router_id, port): - port_id = port['id'] - try: - self.ofc.delete_ofc_router_interface(context, router_id, port_id) - new_status = nconst.ROUTER_STATUS_ACTIVE - self.plugin._update_resource_status(context, "port", port_id, - new_status) - port['status'] = new_status - return port - except (nexc.OFCException, nexc.OFCMappingNotFound) as exc: - with excutils.save_and_reraise_exception(): - reason = _("delete_router_interface() failed due to %s") % exc - LOG.error(reason) - new_status = nconst.ROUTER_STATUS_ERROR - self.plugin._update_resource_status(context, "port", port_id, - new_status) diff --git a/neutron/plugins/nuage/__init__.py b/neutron/plugins/nuage/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/nuage/common/__init__.py b/neutron/plugins/nuage/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/nuage/common/config.py b/neutron/plugins/nuage/common/config.py deleted file mode 100644 index cd5a8a80a..000000000 --- a/neutron/plugins/nuage/common/config.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - -from oslo.config import cfg - - -restproxy_opts = [ - cfg.StrOpt('server', default='localhost:8800', - help=_("IP Address and Port of Nuage's VSD server")), - cfg.StrOpt('serverauth', default='username:password', - secret=True, - help=_("Username and password for authentication")), - cfg.BoolOpt('serverssl', default=False, - help=_("Boolean for SSL connection with VSD server")), - cfg.StrOpt('base_uri', default='/', - help=_("Nuage provided base uri to reach out to VSD")), - cfg.StrOpt('organization', default='system', - help=_("Organization name in which VSD will orchestrate " - "network resources using openstack")), - cfg.StrOpt('auth_resource', default='', - help=_("Nuage provided uri for initial authorization to " - "access VSD")), - cfg.StrOpt('default_net_partition_name', - default='OpenStackDefaultNetPartition', - help=_("Default Network partition in which VSD will " - "orchestrate network resources using openstack")), - cfg.IntOpt('default_floatingip_quota', - default=254, - help=_("Per Net Partition quota of floating ips")), -] - - -def nuage_register_cfg_opts(): - cfg.CONF.register_opts(restproxy_opts, "RESTPROXY") diff --git a/neutron/plugins/nuage/common/constants.py b/neutron/plugins/nuage/common/constants.py deleted file mode 100644 index ff2680bf7..000000000 --- a/neutron/plugins/nuage/common/constants.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - -from neutron.common import constants - -AUTO_CREATE_PORT_OWNERS = [ - constants.DEVICE_OWNER_DHCP, - constants.DEVICE_OWNER_ROUTER_INTF, - constants.DEVICE_OWNER_ROUTER_GW, - constants.DEVICE_OWNER_FLOATINGIP -] - -NOVA_PORT_OWNER_PREF = 'compute:' - -SR_TYPE_FLOATING = "FLOATING" diff --git a/neutron/plugins/nuage/common/exceptions.py b/neutron/plugins/nuage/common/exceptions.py deleted file mode 100644 index 2e1158896..000000000 --- a/neutron/plugins/nuage/common/exceptions.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - - -''' Nuage specific exceptions ''' - -from neutron.common import exceptions as n_exc - - -class OperationNotSupported(n_exc.InvalidConfigurationOption): - message = _("Nuage Plugin does not support this operation: %(msg)s") diff --git a/neutron/plugins/nuage/extensions/__init__.py b/neutron/plugins/nuage/extensions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/nuage/extensions/netpartition.py b/neutron/plugins/nuage/extensions/netpartition.py deleted file mode 100644 index c731e1ded..000000000 --- a/neutron/plugins/nuage/extensions/netpartition.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - -import abc - -from neutron.api import extensions -from neutron.api.v2 import base -from neutron import manager -from neutron import quota - - -# Attribute Map -RESOURCE_ATTRIBUTE_MAP = { - 'net_partitions': { - 'id': {'allow_post': False, 'allow_put': False, - 'validate': {'type:uuid': None}, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '', - 'validate': {'type:name_not_default': None}}, - 'description': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '', - 'validate': {'type:string_or_none': None}}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'required_by_policy': True, - 'is_visible': True}, - }, -} - - -class Netpartition(object): - """Extension class supporting net_partition. - """ - - @classmethod - def get_name(cls): - return "NetPartition" - - @classmethod - def get_alias(cls): - return "net-partition" - - @classmethod - def get_description(cls): - return "NetPartition" - - @classmethod - def get_namespace(cls): - return "http://nuagenetworks.net/ext/net_partition/api/v1.0" - - @classmethod - def get_updated(cls): - return "2014-01-01T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - resource_name = 'net_partition' - collection_name = resource_name.replace('_', '-') + "s" - params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) - quota.QUOTAS.register_resource_by_name(resource_name) - controller = base.create_resource(collection_name, - resource_name, - plugin, params, allow_bulk=True) - ex = extensions.ResourceExtension(collection_name, - controller) - exts.append(ex) - - return exts - - -class NetPartitionPluginBase(object): - - @abc.abstractmethod - def create_net_partition(self, context, router): - pass - - @abc.abstractmethod - def update_net_partition(self, context, id, router): - pass - - @abc.abstractmethod - def get_net_partition(self, context, id, fields=None): - pass - - @abc.abstractmethod - def delete_net_partition(self, context, id): - pass - - @abc.abstractmethod - def get_net_partitions(self, context, filters=None, fields=None): - pass diff --git a/neutron/plugins/nuage/extensions/nuage_router.py b/neutron/plugins/nuage/extensions/nuage_router.py deleted file mode 100644 index 55d4e58d3..000000000 --- a/neutron/plugins/nuage/extensions/nuage_router.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - - -EXTENDED_ATTRIBUTES_2_0 = { - 'routers': { - 'net_partition': { - 'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': None, - 'validate': {'type:string_or_none': None} - }, - 'rd': { - 'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': None, - 'validate': {'type:string_or_none': None} - }, - 'rt': { - 'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': None, - 'validate': {'type:string_or_none': None} - }, - }, -} - - -class Nuage_router(object): - """Extension class supporting nuage router. - """ - - @classmethod - def get_name(cls): - return "Nuage router" - - @classmethod - def get_alias(cls): - return "nuage-router" - - @classmethod - def get_description(cls): - return "Nuage Router" - - @classmethod - def get_namespace(cls): - return "http://nuagenetworks.net/ext/routers/api/v1.0" - - @classmethod - def get_updated(cls): - return "2014-01-01T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/nuage/extensions/nuage_subnet.py b/neutron/plugins/nuage/extensions/nuage_subnet.py deleted file mode 100644 index b3705d5f6..000000000 --- a/neutron/plugins/nuage/extensions/nuage_subnet.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - - -EXTENDED_ATTRIBUTES_2_0 = { - 'subnets': { - 'net_partition': { - 'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': None, - 'validate': {'type:string_or_none': None} - }, - }, -} - - -class Nuage_subnet(object): - """Extension class supporting Nuage subnet. - """ - - @classmethod - def get_name(cls): - return "Nuage subnet" - - @classmethod - def get_alias(cls): - return "nuage-subnet" - - @classmethod - def get_description(cls): - return "Nuage subnet" - - @classmethod - def get_namespace(cls): - return "http://nuagenetworks.net/ext/subnets/api/v1.0" - - @classmethod - def get_updated(cls): - return "2014-01-01T10:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/nuage/nuage_models.py b/neutron/plugins/nuage/nuage_models.py deleted file mode 100644 index f3ebcffa1..000000000 --- a/neutron/plugins/nuage/nuage_models.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - -from sqlalchemy import Boolean, Column, ForeignKey, String - -from neutron.db import model_base -from neutron.db import models_v2 - - -class NetPartition(model_base.BASEV2, models_v2.HasId): - __tablename__ = 'nuage_net_partitions' - name = Column(String(64)) - l3dom_tmplt_id = Column(String(36)) - l2dom_tmplt_id = Column(String(36)) - - -class NetPartitionRouter(model_base.BASEV2): - __tablename__ = "nuage_net_partition_router_mapping" - net_partition_id = Column(String(36), - ForeignKey('nuage_net_partitions.id', - ondelete="CASCADE"), - primary_key=True) - router_id = Column(String(36), - ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - nuage_router_id = Column(String(36)) - - -class RouterZone(model_base.BASEV2): - __tablename__ = "nuage_router_zone_mapping" - router_id = Column(String(36), - ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - nuage_zone_id = Column(String(36)) - nuage_user_id = Column(String(36)) - nuage_group_id = Column(String(36)) - - -class SubnetL2Domain(model_base.BASEV2): - __tablename__ = 'nuage_subnet_l2dom_mapping' - subnet_id = Column(String(36), - ForeignKey('subnets.id', ondelete="CASCADE"), - primary_key=True) - net_partition_id = Column(String(36), - ForeignKey('nuage_net_partitions.id', - ondelete="CASCADE")) - nuage_subnet_id = Column(String(36)) - nuage_l2dom_tmplt_id = Column(String(36)) - nuage_user_id = Column(String(36)) - nuage_group_id = Column(String(36)) - - -class PortVPortMapping(model_base.BASEV2): - __tablename__ = 'nuage_port_mapping' - port_id = Column(String(36), - ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - nuage_vport_id = Column(String(36)) - nuage_vif_id = Column(String(36)) - static_ip = Column(Boolean()) - - -class RouterRoutesMapping(model_base.BASEV2, models_v2.Route): - __tablename__ = 'nuage_routerroutes_mapping' - router_id = Column(String(36), - ForeignKey('routers.id', - ondelete="CASCADE"), - primary_key=True, - nullable=False) - nuage_route_id = Column(String(36)) - - -class FloatingIPPoolMapping(model_base.BASEV2): - __tablename__ = "nuage_floatingip_pool_mapping" - fip_pool_id = Column(String(36), primary_key=True) - net_id = Column(String(36), - ForeignKey('networks.id', ondelete="CASCADE")) - router_id = Column(String(36)) - - -class FloatingIPMapping(model_base.BASEV2): - __tablename__ = 'nuage_floatingip_mapping' - fip_id = Column(String(36), - ForeignKey('floatingips.id', - ondelete="CASCADE"), - primary_key=True) - router_id = Column(String(36)) - nuage_fip_id = Column(String(36)) diff --git a/neutron/plugins/nuage/nuagedb.py b/neutron/plugins/nuage/nuagedb.py deleted file mode 100644 index bd1b2f3d2..000000000 --- a/neutron/plugins/nuage/nuagedb.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - -from neutron.db import db_base_plugin_v2 -from neutron.plugins.nuage import nuage_models - - -def add_entrouter_mapping(session, np_id, - router_id, - n_l3id): - ent_rtr_mapping = nuage_models.NetPartitionRouter(net_partition_id=np_id, - router_id=router_id, - nuage_router_id=n_l3id) - session.add(ent_rtr_mapping) - - -def add_rtrzone_mapping(session, neutron_router_id, - nuage_zone_id, - nuage_user_id=None, - nuage_group_id=None): - rtr_zone_mapping = nuage_models.RouterZone(router_id=neutron_router_id, - nuage_zone_id=nuage_zone_id, - nuage_user_id=nuage_user_id, - nuage_group_id=nuage_group_id) - session.add(rtr_zone_mapping) - - -def add_subnetl2dom_mapping(session, neutron_subnet_id, - nuage_sub_id, - np_id, - l2dom_id=None, - nuage_user_id=None, - nuage_group_id=None): - subnet_l2dom = nuage_models.SubnetL2Domain(subnet_id=neutron_subnet_id, - nuage_subnet_id=nuage_sub_id, - net_partition_id=np_id, - nuage_l2dom_tmplt_id=l2dom_id, - nuage_user_id=nuage_user_id, - nuage_group_id=nuage_group_id) - session.add(subnet_l2dom) - - -def update_subnetl2dom_mapping(subnet_l2dom, - new_dict): - subnet_l2dom.update(new_dict) - - -def delete_subnetl2dom_mapping(session, subnet_l2dom): - session.delete(subnet_l2dom) - - -def add_port_vport_mapping(session, port_id, nuage_vport_id, - nuage_vif_id, static_ip): - port_mapping = nuage_models.PortVPortMapping(port_id=port_id, - nuage_vport_id=nuage_vport_id, - nuage_vif_id=nuage_vif_id, - static_ip=static_ip) - session.add(port_mapping) - return port_mapping - - -def update_port_vport_mapping(port_mapping, - new_dict): - port_mapping.update(new_dict) - - -def get_port_mapping_by_id(session, id): - query = session.query(nuage_models.PortVPortMapping) - return query.filter_by(port_id=id).first() - - -def get_ent_rtr_mapping_by_rtrid(session, rtrid): - query = session.query(nuage_models.NetPartitionRouter) - return query.filter_by(router_id=rtrid).first() - - -def get_rtr_zone_mapping(session, router_id): - query = session.query(nuage_models.RouterZone) - return query.filter_by(router_id=router_id).first() - - -def get_subnet_l2dom_by_id(session, id): - query = session.query(nuage_models.SubnetL2Domain) - return query.filter_by(subnet_id=id).first() - - -def add_net_partition(session, netpart_id, - l3dom_id, l2dom_id, - ent_name): - net_partitioninst = nuage_models.NetPartition(id=netpart_id, - name=ent_name, - l3dom_tmplt_id=l3dom_id, - l2dom_tmplt_id=l2dom_id) - session.add(net_partitioninst) - return net_partitioninst - - -def delete_net_partition(session, net_partition): - session.delete(net_partition) - - -def get_ent_rtr_mapping_by_entid(session, - entid): - query = session.query(nuage_models.NetPartitionRouter) - return query.filter_by(net_partition_id=entid).all() - - -def get_net_partition_by_name(session, name): - query = session.query(nuage_models.NetPartition) - return query.filter_by(name=name).first() - - -def get_net_partition_by_id(session, id): - query = session.query(nuage_models.NetPartition) - return query.filter_by(id=id).first() - - -def get_net_partitions(session, filters=None, fields=None): - query = session.query(nuage_models.NetPartition) - common_db = db_base_plugin_v2.CommonDbMixin() - query = common_db._apply_filters_to_query(query, - nuage_models.NetPartition, - filters) - return query - - -def delete_static_route(session, static_route): - session.delete(static_route) - - -def get_router_route_mapping(session, id, route): - qry = session.query(nuage_models.RouterRoutesMapping) - return qry.filter_by(router_id=id, - destination=route['destination'], - nexthop=route['nexthop']).one() - - -def add_static_route(session, router_id, nuage_rtr_id, - destination, nexthop): - staticrt = nuage_models.RouterRoutesMapping(router_id=router_id, - nuage_route_id=nuage_rtr_id, - destination=destination, - nexthop=nexthop) - session.add(staticrt) - return staticrt - - -def add_fip_mapping(session, neutron_fip_id, router_id, nuage_fip_id): - fip = nuage_models.FloatingIPMapping(fip_id=neutron_fip_id, - router_id=router_id, - nuage_fip_id=nuage_fip_id) - session.add(fip) - return fip - - -def delete_fip_mapping(session, fip_mapping): - session.delete(fip_mapping) - - -def add_fip_pool_mapping(session, fip_pool_id, net_id, router_id=None): - fip_pool_mapping = nuage_models.FloatingIPPoolMapping( - fip_pool_id=fip_pool_id, - net_id=net_id, - router_id=router_id) - session.add(fip_pool_mapping) - return fip_pool_mapping - - -def delete_fip_pool_mapping(session, fip_pool_mapping): - session.delete(fip_pool_mapping) - - -def get_fip_pool_by_id(session, id): - query = session.query(nuage_models.FloatingIPPoolMapping) - return query.filter_by(fip_pool_id=id).first() - - -def get_fip_pool_from_netid(session, net_id): - query = session.query(nuage_models.FloatingIPPoolMapping) - return query.filter_by(net_id=net_id).first() - - -def get_fip_mapping_by_id(session, id): - qry = session.query(nuage_models.FloatingIPMapping) - return qry.filter_by(fip_id=id).first() - - -def update_fip_pool_mapping(fip_pool_mapping, new_dict): - fip_pool_mapping.update(new_dict) diff --git a/neutron/plugins/nuage/plugin.py b/neutron/plugins/nuage/plugin.py deleted file mode 100644 index bf95c1eec..000000000 --- a/neutron/plugins/nuage/plugin.py +++ /dev/null @@ -1,1006 +0,0 @@ -# Copyright 2014 Alcatel-Lucent USA Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc. - - -import re - -import netaddr -from oslo.config import cfg -from sqlalchemy.orm import exc - -from neutron.api import extensions as neutron_extensions -from neutron.api.v2 import attributes -from neutron.common import constants as os_constants -from neutron.common import exceptions as n_exc -from neutron.common import utils -from neutron.db import api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.db import quota_db # noqa -from neutron.extensions import external_net -from neutron.extensions import l3 -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.plugins.nuage.common import config -from neutron.plugins.nuage.common import constants -from neutron.plugins.nuage.common import exceptions as nuage_exc -from neutron.plugins.nuage import extensions -from neutron.plugins.nuage.extensions import netpartition -from neutron.plugins.nuage import nuagedb -from neutron import policy - - -class NuagePlugin(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_db.L3_NAT_db_mixin, - netpartition.NetPartitionPluginBase): - """Class that implements Nuage Networks' plugin functionality.""" - supported_extension_aliases = ["router", "binding", "external-net", - "net-partition", "nuage-router", - "nuage-subnet", "quotas", "extraroute"] - - binding_view = "extension:port_binding:view" - - def __init__(self): - super(NuagePlugin, self).__init__() - neutron_extensions.append_api_extensions_path(extensions.__path__) - config.nuage_register_cfg_opts() - self.nuageclient_init() - net_partition = cfg.CONF.RESTPROXY.default_net_partition_name - self._create_default_net_partition(net_partition) - - def nuageclient_init(self): - server = cfg.CONF.RESTPROXY.server - serverauth = cfg.CONF.RESTPROXY.serverauth - serverssl = cfg.CONF.RESTPROXY.serverssl - base_uri = cfg.CONF.RESTPROXY.base_uri - auth_resource = cfg.CONF.RESTPROXY.auth_resource - organization = cfg.CONF.RESTPROXY.organization - nuageclient = importutils.import_module('nuagenetlib.nuageclient') - self.nuageclient = nuageclient.NuageClient(server, base_uri, - serverssl, serverauth, - auth_resource, - organization) - - def _resource_finder(self, context, for_resource, resource, user_req): - match = re.match(attributes.UUID_PATTERN, user_req[resource]) - if match: - obj_lister = getattr(self, "get_%s" % resource) - found_resource = obj_lister(context, user_req[resource]) - if not found_resource: - msg = (_("%(resource)s with id %(resource_id)s does not " - "exist") % {'resource': resource, - 'resource_id': user_req[resource]}) - raise n_exc.BadRequest(resource=for_resource, msg=msg) - else: - filter = {'name': [user_req[resource]]} - obj_lister = getattr(self, "get_%ss" % resource) - found_resource = obj_lister(context, filters=filter) - if not found_resource: - msg = (_("Either %(resource)s %(req_resource)s not found " - "or you dont have credential to access it") - % {'resource': resource, - 'req_resource': user_req[resource]}) - raise n_exc.BadRequest(resource=for_resource, msg=msg) - if len(found_resource) > 1: - msg = (_("More than one entry found for %(resource)s " - "%(req_resource)s. Use id instead") - % {'resource': resource, - 'req_resource': user_req[resource]}) - raise n_exc.BadRequest(resource=for_resource, msg=msg) - found_resource = found_resource[0] - return found_resource - - def _update_port_ip(self, context, port, new_ip): - subid = port['fixed_ips'][0]['subnet_id'] - new_fixed_ips = {} - new_fixed_ips['subnet_id'] = subid - new_fixed_ips['ip_address'] = new_ip - ips, prev_ips = self._update_ips_for_port(context, - port["network_id"], - port['id'], - port["fixed_ips"], - [new_fixed_ips]) - - # Update ips if necessary - for ip in ips: - allocated = models_v2.IPAllocation( - network_id=port['network_id'], port_id=port['id'], - ip_address=ip['ip_address'], subnet_id=ip['subnet_id']) - context.session.add(allocated) - - def _create_update_port(self, context, port, - port_mapping, subnet_mapping): - filters = {'device_id': [port['device_id']]} - ports = self.get_ports(context, filters) - netpart_id = subnet_mapping['net_partition_id'] - net_partition = nuagedb.get_net_partition_by_id(context.session, - netpart_id) - params = { - 'id': port['device_id'], - 'mac': port['mac_address'], - 'parent_id': subnet_mapping['nuage_subnet_id'], - 'net_partition': net_partition, - 'ip': None, - 'no_of_ports': len(ports), - 'tenant': port['tenant_id'] - } - if port_mapping['static_ip']: - params['ip'] = port['fixed_ips'][0]['ip_address'] - - nuage_vm = self.nuageclient.create_vms(params) - if nuage_vm: - if port['fixed_ips'][0]['ip_address'] != str(nuage_vm['ip']): - self._update_port_ip(context, port, nuage_vm['ip']) - port_dict = { - 'nuage_vport_id': nuage_vm['vport_id'], - 'nuage_vif_id': nuage_vm['vif_id'] - } - nuagedb.update_port_vport_mapping(port_mapping, - port_dict) - - def create_port(self, context, port): - session = context.session - with session.begin(subtransactions=True): - p = port['port'] - port = super(NuagePlugin, self).create_port(context, port) - device_owner = port.get('device_owner', None) - if (device_owner and - device_owner not in constants.AUTO_CREATE_PORT_OWNERS): - if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: - return self._extend_port_dict_binding(context, port) - subnet_id = port['fixed_ips'][0]['subnet_id'] - subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, - subnet_id) - if subnet_mapping: - static_ip = False - if (attributes.is_attr_set(p['fixed_ips']) and - 'ip_address' in p['fixed_ips'][0]): - static_ip = True - nuage_vport_id = None - nuage_vif_id = None - port_mapping = nuagedb.add_port_vport_mapping( - session, - port['id'], - nuage_vport_id, - nuage_vif_id, - static_ip) - port_prefix = constants.NOVA_PORT_OWNER_PREF - if port['device_owner'].startswith(port_prefix): - #This request is coming from nova - try: - self._create_update_port(context, port, - port_mapping, - subnet_mapping) - except Exception: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, self).delete_port( - context, - port['id']) - return self._extend_port_dict_binding(context, port) - - def update_port(self, context, id, port): - p = port['port'] - if p.get('device_owner', '').startswith( - constants.NOVA_PORT_OWNER_PREF): - session = context.session - with session.begin(subtransactions=True): - port = self._get_port(context, id) - port.update(p) - if 'fixed_ips' not in port or len(port['fixed_ips']) == 0: - return self._make_port_dict(port) - subnet_id = port['fixed_ips'][0]['subnet_id'] - subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, - subnet_id) - if not subnet_mapping: - msg = (_("Subnet %s not found on VSD") % subnet_id) - raise n_exc.BadRequest(resource='port', msg=msg) - port_mapping = nuagedb.get_port_mapping_by_id(session, - id) - if not port_mapping: - msg = (_("Port-Mapping for port %s not " - " found on VSD") % id) - raise n_exc.BadRequest(resource='port', msg=msg) - if not port_mapping['nuage_vport_id']: - self._create_update_port(context, port, - port_mapping, subnet_mapping) - updated_port = self._make_port_dict(port) - else: - updated_port = super(NuagePlugin, self).update_port(context, id, - port) - return updated_port - - def delete_port(self, context, id, l3_port_check=True): - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - port = self._get_port(context, id) - port_mapping = nuagedb.get_port_mapping_by_id(context.session, - id) - # This is required for to pass ut test_floatingip_port_delete - self.disassociate_floatingips(context, id) - if not port['fixed_ips']: - return super(NuagePlugin, self).delete_port(context, id) - - sub_id = port['fixed_ips'][0]['subnet_id'] - subnet_mapping = nuagedb.get_subnet_l2dom_by_id(context.session, - sub_id) - if not subnet_mapping: - return super(NuagePlugin, self).delete_port(context, id) - - netpart_id = subnet_mapping['net_partition_id'] - net_partition = nuagedb.get_net_partition_by_id(context.session, - netpart_id) - # Need to call this explicitly to delete vport_vporttag_mapping - if constants.NOVA_PORT_OWNER_PREF in port['device_owner']: - # This was a VM Port - filters = {'device_id': [port['device_id']]} - ports = self.get_ports(context, filters) - params = { - 'no_of_ports': len(ports), - 'net_partition': net_partition, - 'tenant': port['tenant_id'], - 'mac': port['mac_address'], - 'nuage_vif_id': port_mapping['nuage_vif_id'], - 'id': port['device_id'] - } - self.nuageclient.delete_vms(params) - super(NuagePlugin, self).delete_port(context, id) - - def _check_view_auth(self, context, resource, action): - return policy.check(context, action, resource) - - def _extend_port_dict_binding(self, context, port): - if self._check_view_auth(context, port, self.binding_view): - port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS - port[portbindings.VIF_DETAILS] = { - portbindings.CAP_PORT_FILTER: False - } - return port - - def get_port(self, context, id, fields=None): - port = super(NuagePlugin, self).get_port(context, id, fields) - return self._fields(self._extend_port_dict_binding(context, port), - fields) - - def get_ports(self, context, filters=None, fields=None): - ports = super(NuagePlugin, self).get_ports(context, filters, fields) - return [self._fields(self._extend_port_dict_binding(context, port), - fields) for port in ports] - - def _check_router_subnet_for_tenant(self, context): - # Search router and subnet tables. - # If no entry left delete user and group from VSD - filters = {'tenant_id': [context.tenant]} - routers = self.get_routers(context, filters=filters) - subnets = self.get_subnets(context, filters=filters) - return bool(routers or subnets) - - def create_network(self, context, network): - net = network['network'] - with context.session.begin(subtransactions=True): - net = super(NuagePlugin, self).create_network(context, - network) - self._process_l3_create(context, net, network['network']) - return net - - def _validate_update_network(self, context, id, network): - req_data = network['network'] - is_external_set = req_data.get(external_net.EXTERNAL) - if not attributes.is_attr_set(is_external_set): - return (None, None) - neutron_net = self.get_network(context, id) - if neutron_net.get(external_net.EXTERNAL) == is_external_set: - return (None, None) - subnet = self._validate_nuage_sharedresource(context, 'network', id) - if subnet and not is_external_set: - msg = _('External network with subnets can not be ' - 'changed to non-external network') - raise nuage_exc.OperationNotSupported(msg=msg) - return (is_external_set, subnet) - - def update_network(self, context, id, network): - with context.session.begin(subtransactions=True): - is_external_set, subnet = self._validate_update_network(context, - id, - network) - net = super(NuagePlugin, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - if subnet and is_external_set: - subn = subnet[0] - subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, - subn['id']) - if subnet_l2dom: - nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] - nuage_l2dom_tid = subnet_l2dom['nuage_l2dom_tmplt_id'] - user_id = subnet_l2dom['nuage_user_id'] - group_id = subnet_l2dom['nuage_group_id'] - self.nuageclient.delete_subnet(nuage_subnet_id, - nuage_l2dom_tid) - self.nuageclient.delete_user(user_id) - self.nuageclient.delete_group(group_id) - nuagedb.delete_subnetl2dom_mapping(context.session, - subnet_l2dom) - self._add_nuage_sharedresource(context, - subnet[0], - id, - constants.SR_TYPE_FLOATING) - return net - - def delete_network(self, context, id): - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, id) - filter = {'network_id': [id]} - subnets = self.get_subnets(context, filters=filter) - for subnet in subnets: - self.delete_subnet(context, subnet['id']) - super(NuagePlugin, self).delete_network(context, id) - - def _get_net_partition_for_subnet(self, context, subnet): - subn = subnet['subnet'] - ent = subn.get('net_partition', None) - if not ent: - def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name - net_partition = nuagedb.get_net_partition_by_name(context.session, - def_net_part) - else: - net_partition = self._resource_finder(context, 'subnet', - 'net_partition', subn) - if not net_partition: - msg = _('Either net_partition is not provided with subnet OR ' - 'default net_partition is not created at the start') - raise n_exc.BadRequest(resource='subnet', msg=msg) - return net_partition - - def _validate_create_subnet(self, subnet): - if ('host_routes' in subnet and - attributes.is_attr_set(subnet['host_routes'])): - msg = 'host_routes extensions not supported for subnets' - raise nuage_exc.OperationNotSupported(msg=msg) - if subnet['gateway_ip'] is None: - msg = "no-gateway option not supported with subnets" - raise nuage_exc.OperationNotSupported(msg=msg) - - def _delete_nuage_sharedresource(self, context, net_id): - sharedresource_id = self.nuageclient.delete_nuage_sharedresource( - net_id) - if sharedresource_id: - fip_pool_mapping = nuagedb.get_fip_pool_by_id(context.session, - sharedresource_id) - if fip_pool_mapping: - with context.session.begin(subtransactions=True): - nuagedb.delete_fip_pool_mapping(context.session, - fip_pool_mapping) - - def _validate_nuage_sharedresource(self, context, resource, net_id): - filter = {'network_id': [net_id]} - existing_subn = self.get_subnets(context, filters=filter) - if len(existing_subn) > 1: - msg = _('Only one subnet is allowed per ' - 'external network %s') % net_id - raise nuage_exc.OperationNotSupported(msg=msg) - return existing_subn - - def _add_nuage_sharedresource(self, context, subnet, net_id, type): - net = netaddr.IPNetwork(subnet['cidr']) - params = { - 'neutron_subnet': subnet, - 'net': net, - 'type': type - } - fip_pool_id = self.nuageclient.create_nuage_sharedresource(params) - nuagedb.add_fip_pool_mapping(context.session, fip_pool_id, net_id) - - def _create_nuage_sharedresource(self, context, subnet, type): - subn = subnet['subnet'] - net_id = subn['network_id'] - self._validate_nuage_sharedresource(context, 'subnet', net_id) - with context.session.begin(subtransactions=True): - subn = super(NuagePlugin, self).create_subnet(context, subnet) - self._add_nuage_sharedresource(context, subn, net_id, type) - return subn - - def _create_nuage_subnet(self, context, neutron_subnet, net_partition): - net = netaddr.IPNetwork(neutron_subnet['cidr']) - params = { - 'net_partition': net_partition, - 'tenant_id': neutron_subnet['tenant_id'], - 'net': net - } - try: - nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, - params) - except Exception: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, self).delete_subnet(context, - neutron_subnet['id']) - - if nuage_subnet: - l2dom_id = str(nuage_subnet['nuage_l2template_id']) - user_id = nuage_subnet['nuage_userid'] - group_id = nuage_subnet['nuage_groupid'] - id = nuage_subnet['nuage_l2domain_id'] - with context.session.begin(subtransactions=True): - nuagedb.add_subnetl2dom_mapping(context.session, - neutron_subnet['id'], - id, - net_partition['id'], - l2dom_id=l2dom_id, - nuage_user_id=user_id, - nuage_group_id=group_id) - - def create_subnet(self, context, subnet): - subn = subnet['subnet'] - net_id = subn['network_id'] - - if self._network_is_external(context, net_id): - return self._create_nuage_sharedresource( - context, subnet, constants.SR_TYPE_FLOATING) - - self._validate_create_subnet(subn) - - net_partition = self._get_net_partition_for_subnet(context, subnet) - neutron_subnet = super(NuagePlugin, self).create_subnet(context, - subnet) - self._create_nuage_subnet(context, neutron_subnet, net_partition) - return neutron_subnet - - def delete_subnet(self, context, id): - subnet = self.get_subnet(context, id) - if self._network_is_external(context, subnet['network_id']): - super(NuagePlugin, self).delete_subnet(context, id) - return self._delete_nuage_sharedresource(context, id) - - subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(context.session, id) - if subnet_l2dom: - template_id = subnet_l2dom['nuage_l2dom_tmplt_id'] - try: - self.nuageclient.delete_subnet(subnet_l2dom['nuage_subnet_id'], - template_id) - except Exception: - msg = (_('Unable to complete operation on subnet %s.' - 'One or more ports have an IP allocation ' - 'from this subnet.') % id) - raise n_exc.BadRequest(resource='subnet', msg=msg) - super(NuagePlugin, self).delete_subnet(context, id) - if subnet_l2dom and not self._check_router_subnet_for_tenant(context): - self.nuageclient.delete_user(subnet_l2dom['nuage_user_id']) - self.nuageclient.delete_group(subnet_l2dom['nuage_group_id']) - - def add_router_interface(self, context, router_id, interface_info): - session = context.session - with session.begin(subtransactions=True): - rtr_if_info = super(NuagePlugin, - self).add_router_interface(context, - router_id, - interface_info) - subnet_id = rtr_if_info['subnet_id'] - subn = self.get_subnet(context, subnet_id) - - rtr_zone_mapping = nuagedb.get_rtr_zone_mapping(session, - router_id) - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, - router_id) - subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, - subnet_id) - if not rtr_zone_mapping or not ent_rtr_mapping: - super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - msg = (_("Router %s does not hold default zone OR " - "net_partition mapping. Router-IF add failed") - % router_id) - raise n_exc.BadRequest(resource='router', msg=msg) - - if not subnet_l2dom: - super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - msg = (_("Subnet %s does not hold Nuage VSD reference. " - "Router-IF add failed") % subnet_id) - raise n_exc.BadRequest(resource='subnet', msg=msg) - - if (subnet_l2dom['net_partition_id'] != - ent_rtr_mapping['net_partition_id']): - super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - msg = (_("Subnet %(subnet)s and Router %(router)s belong to " - "different net_partition Router-IF add " - "not permitted") % {'subnet': subnet_id, - 'router': router_id}) - raise n_exc.BadRequest(resource='subnet', msg=msg) - nuage_subnet_id = subnet_l2dom['nuage_subnet_id'] - nuage_l2dom_tmplt_id = subnet_l2dom['nuage_l2dom_tmplt_id'] - if self.nuageclient.vms_on_l2domain(nuage_subnet_id): - super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - msg = (_("Subnet %s has one or more active VMs " - "Router-IF add not permitted") % subnet_id) - raise n_exc.BadRequest(resource='subnet', msg=msg) - self.nuageclient.delete_subnet(nuage_subnet_id, - nuage_l2dom_tmplt_id) - net = netaddr.IPNetwork(subn['cidr']) - params = { - 'net': net, - 'zone_id': rtr_zone_mapping['nuage_zone_id'] - } - if not attributes.is_attr_set(subn['gateway_ip']): - subn['gateway_ip'] = str(netaddr.IPAddress(net.first + 1)) - try: - nuage_subnet = self.nuageclient.create_domain_subnet(subn, - params) - except Exception: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - if nuage_subnet: - ns_dict = {} - ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_subnetid'] - ns_dict['nuage_l2dom_tmplt_id'] = None - nuagedb.update_subnetl2dom_mapping(subnet_l2dom, - ns_dict) - return rtr_if_info - - def remove_router_interface(self, context, router_id, interface_info): - if 'subnet_id' in interface_info: - subnet_id = interface_info['subnet_id'] - subnet = self.get_subnet(context, subnet_id) - found = False - try: - filters = {'device_id': [router_id], - 'device_owner': - [os_constants.DEVICE_OWNER_ROUTER_INTF], - 'network_id': [subnet['network_id']]} - ports = self.get_ports(context, filters) - - for p in ports: - if p['fixed_ips'][0]['subnet_id'] == subnet_id: - found = True - break - except exc.NoResultFound: - msg = (_("No router interface found for Router %s. " - "Router-IF delete failed") % router_id) - raise n_exc.BadRequest(resource='router', msg=msg) - - if not found: - msg = (_("No router interface found for Router %s. " - "Router-IF delete failed") % router_id) - raise n_exc.BadRequest(resource='router', msg=msg) - elif 'port_id' in interface_info: - port_db = self._get_port(context, interface_info['port_id']) - if not port_db: - msg = (_("No router interface found for Router %s. " - "Router-IF delete failed") % router_id) - raise n_exc.BadRequest(resource='router', msg=msg) - subnet_id = port_db['fixed_ips'][0]['subnet_id'] - - session = context.session - with session.begin(subtransactions=True): - subnet_l2dom = nuagedb.get_subnet_l2dom_by_id(session, - subnet_id) - if not subnet_l2dom: - return super(NuagePlugin, - self).remove_router_interface(context, - router_id, - interface_info) - nuage_subn_id = subnet_l2dom['nuage_subnet_id'] - if self.nuageclient.vms_on_l2domain(nuage_subn_id): - msg = (_("Subnet %s has one or more active VMs " - "Router-IF delete not permitted") % subnet_id) - raise n_exc.BadRequest(resource='subnet', msg=msg) - - neutron_subnet = self.get_subnet(context, subnet_id) - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( - context.session, - router_id) - if not ent_rtr_mapping: - msg = (_("Router %s does not hold net_partition " - "assoc on Nuage VSD. Router-IF delete failed") - % router_id) - raise n_exc.BadRequest(resource='router', msg=msg) - net = netaddr.IPNetwork(neutron_subnet['cidr']) - net_part_id = ent_rtr_mapping['net_partition_id'] - net_partition = self.get_net_partition(context, - net_part_id) - params = { - 'net_partition': net_partition, - 'tenant_id': neutron_subnet['tenant_id'], - 'net': net - } - nuage_subnet = self.nuageclient.create_subnet(neutron_subnet, - params) - self.nuageclient.delete_domain_subnet(nuage_subn_id) - info = super(NuagePlugin, - self).remove_router_interface(context, router_id, - interface_info) - if nuage_subnet: - tmplt_id = str(nuage_subnet['nuage_l2template_id']) - ns_dict = {} - ns_dict['nuage_subnet_id'] = nuage_subnet['nuage_l2domain_id'] - ns_dict['nuage_l2dom_tmplt_id'] = tmplt_id - nuagedb.update_subnetl2dom_mapping(subnet_l2dom, - ns_dict) - return info - - def _get_net_partition_for_router(self, context, router): - rtr = router['router'] - ent = rtr.get('net_partition', None) - if not ent: - def_net_part = cfg.CONF.RESTPROXY.default_net_partition_name - net_partition = nuagedb.get_net_partition_by_name(context.session, - def_net_part) - else: - net_partition = self._resource_finder(context, 'router', - 'net_partition', rtr) - if not net_partition: - msg = _("Either net_partition is not provided with router OR " - "default net_partition is not created at the start") - raise n_exc.BadRequest(resource='router', msg=msg) - return net_partition - - def create_router(self, context, router): - net_partition = self._get_net_partition_for_router(context, router) - neutron_router = super(NuagePlugin, self).create_router(context, - router) - params = { - 'net_partition': net_partition, - 'tenant_id': neutron_router['tenant_id'] - } - try: - nuage_router = self.nuageclient.create_router(neutron_router, - router['router'], - params) - except Exception: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, self).delete_router(context, - neutron_router['id']) - if nuage_router: - user_id = nuage_router['nuage_userid'] - group_id = nuage_router['nuage_groupid'] - with context.session.begin(subtransactions=True): - nuagedb.add_entrouter_mapping(context.session, - net_partition['id'], - neutron_router['id'], - nuage_router['nuage_domain_id']) - nuagedb.add_rtrzone_mapping(context.session, - neutron_router['id'], - nuage_router['nuage_def_zone_id'], - nuage_user_id=user_id, - nuage_group_id=group_id) - return neutron_router - - def _validate_nuage_staticroutes(self, old_routes, added, removed): - cidrs = [] - for old in old_routes: - if old not in removed: - ip = netaddr.IPNetwork(old['destination']) - cidrs.append(ip) - for route in added: - ip = netaddr.IPNetwork(route['destination']) - matching = netaddr.all_matching_cidrs(ip.ip, cidrs) - if matching: - msg = _('for same subnet, multiple static routes not allowed') - raise n_exc.BadRequest(resource='router', msg=msg) - cidrs.append(ip) - - def update_router(self, context, id, router): - r = router['router'] - with context.session.begin(subtransactions=True): - if 'routes' in r: - old_routes = self._get_extra_routes_by_router_id(context, - id) - added, removed = utils.diff_list_of_dict(old_routes, - r['routes']) - self._validate_nuage_staticroutes(old_routes, added, removed) - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( - context.session, id) - if not ent_rtr_mapping: - msg = (_("Router %s does not hold net-partition " - "assoc on VSD. extra-route failed") % id) - raise n_exc.BadRequest(resource='router', msg=msg) - # Let it do internal checks first and verify it. - router_updated = super(NuagePlugin, - self).update_router(context, - id, - router) - for route in removed: - rtr_rt_mapping = nuagedb.get_router_route_mapping( - context.session, id, route) - if rtr_rt_mapping: - self.nuageclient.delete_nuage_staticroute( - rtr_rt_mapping['nuage_route_id']) - nuagedb.delete_static_route(context.session, - rtr_rt_mapping) - for route in added: - params = { - 'parent_id': ent_rtr_mapping['nuage_router_id'], - 'net': netaddr.IPNetwork(route['destination']), - 'nexthop': route['nexthop'] - } - nuage_rt_id = self.nuageclient.create_nuage_staticroute( - params) - nuagedb.add_static_route(context.session, - id, nuage_rt_id, - route['destination'], - route['nexthop']) - else: - router_updated = super(NuagePlugin, self).update_router( - context, id, router) - return router_updated - - def delete_router(self, context, id): - session = context.session - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid(session, - id) - if ent_rtr_mapping: - filters = { - 'device_id': [id], - 'device_owner': [os_constants.DEVICE_OWNER_ROUTER_INTF] - } - ports = self.get_ports(context, filters) - if ports: - raise l3.RouterInUse(router_id=id) - nuage_router_id = ent_rtr_mapping['nuage_router_id'] - self.nuageclient.delete_router(nuage_router_id) - router_zone = nuagedb.get_rtr_zone_mapping(session, id) - super(NuagePlugin, self).delete_router(context, id) - if router_zone and not self._check_router_subnet_for_tenant(context): - self.nuageclient.delete_user(router_zone['nuage_user_id']) - self.nuageclient.delete_group(router_zone['nuage_group_id']) - - def _make_net_partition_dict(self, net_partition, fields=None): - res = { - 'id': net_partition['id'], - 'name': net_partition['name'], - 'l3dom_tmplt_id': net_partition['l3dom_tmplt_id'], - 'l2dom_tmplt_id': net_partition['l2dom_tmplt_id'], - } - return self._fields(res, fields) - - def _create_net_partition(self, session, net_part_name): - fip_quota = cfg.CONF.RESTPROXY.default_floatingip_quota - params = { - "name": net_part_name, - "fp_quota": str(fip_quota) - } - nuage_net_partition = self.nuageclient.create_net_partition(params) - net_partitioninst = None - if nuage_net_partition: - nuage_entid = nuage_net_partition['nuage_entid'] - l3dom_id = nuage_net_partition['l3dom_id'] - l2dom_id = nuage_net_partition['l2dom_id'] - with session.begin(): - net_partitioninst = nuagedb.add_net_partition(session, - nuage_entid, - l3dom_id, - l2dom_id, - net_part_name) - if not net_partitioninst: - return {} - return self._make_net_partition_dict(net_partitioninst) - - def _create_default_net_partition(self, default_net_part): - def_netpart = self.nuageclient.get_def_netpartition_data( - default_net_part) - session = db.get_session() - if def_netpart: - net_partition = nuagedb.get_net_partition_by_name( - session, default_net_part) - with session.begin(subtransactions=True): - if net_partition: - nuagedb.delete_net_partition(session, net_partition) - net_part = nuagedb.add_net_partition(session, - def_netpart['np_id'], - def_netpart['l3dom_tid'], - def_netpart['l2dom_tid'], - default_net_part) - return self._make_net_partition_dict(net_part) - else: - return self._create_net_partition(session, default_net_part) - - def create_net_partition(self, context, net_partition): - ent = net_partition['net_partition'] - session = context.session - return self._create_net_partition(session, ent["name"]) - - def delete_net_partition(self, context, id): - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_entid( - context.session, - id) - if ent_rtr_mapping: - msg = (_("One or more router still attached to " - "net_partition %s.") % id) - raise n_exc.BadRequest(resource='net_partition', msg=msg) - net_partition = nuagedb.get_net_partition_by_id(context.session, id) - if not net_partition: - msg = (_("NetPartition with %s does not exist") % id) - raise n_exc.BadRequest(resource='net_partition', msg=msg) - l3dom_tmplt_id = net_partition['l3dom_tmplt_id'] - l2dom_tmplt_id = net_partition['l2dom_tmplt_id'] - self.nuageclient.delete_net_partition(net_partition['id'], - l3dom_id=l3dom_tmplt_id, - l2dom_id=l2dom_tmplt_id) - with context.session.begin(subtransactions=True): - nuagedb.delete_net_partition(context.session, - net_partition) - - def get_net_partition(self, context, id, fields=None): - net_partition = nuagedb.get_net_partition_by_id(context.session, - id) - return self._make_net_partition_dict(net_partition) - - def get_net_partitions(self, context, filters=None, fields=None): - net_partitions = nuagedb.get_net_partitions(context.session, - filters=filters, - fields=fields) - return [self._make_net_partition_dict(net_partition, fields) - for net_partition in net_partitions] - - def _check_floatingip_update(self, context, port): - filter = {'fixed_port_id': [port['id']]} - local_fip = self.get_floatingips(context, - filters=filter) - if local_fip: - fip = local_fip[0] - self._create_update_floatingip(context, - fip, port['id']) - - def _create_update_floatingip(self, context, - neutron_fip, port_id): - rtr_id = neutron_fip['router_id'] - net_id = neutron_fip['floating_network_id'] - - fip_pool_mapping = nuagedb.get_fip_pool_from_netid(context.session, - net_id) - fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, - neutron_fip['id']) - - if not fip_mapping: - ent_rtr_mapping = nuagedb.get_ent_rtr_mapping_by_rtrid( - context.session, rtr_id) - if not ent_rtr_mapping: - msg = _('router %s is not associated with ' - 'any net-partition') % rtr_id - raise n_exc.BadRequest(resource='floatingip', - msg=msg) - params = { - 'nuage_rtr_id': ent_rtr_mapping['nuage_router_id'], - 'nuage_fippool_id': fip_pool_mapping['fip_pool_id'], - 'neutron_fip_ip': neutron_fip['floating_ip_address'] - } - nuage_fip_id = self.nuageclient.create_nuage_floatingip(params) - nuagedb.add_fip_mapping(context.session, - neutron_fip['id'], - rtr_id, nuage_fip_id) - else: - if rtr_id != fip_mapping['router_id']: - msg = _('Floating IP can not be associated to VM in ' - 'different router context') - raise nuage_exc.OperationNotSupported(msg=msg) - nuage_fip_id = fip_mapping['nuage_fip_id'] - - fip_pool_dict = {'router_id': neutron_fip['router_id']} - nuagedb.update_fip_pool_mapping(fip_pool_mapping, - fip_pool_dict) - - # Update VM if required - port_mapping = nuagedb.get_port_mapping_by_id(context.session, - port_id) - if port_mapping: - params = { - 'nuage_vport_id': port_mapping['nuage_vport_id'], - 'nuage_fip_id': nuage_fip_id - } - self.nuageclient.update_nuage_vm_vport(params) - - def create_floatingip(self, context, floatingip): - fip = floatingip['floatingip'] - with context.session.begin(subtransactions=True): - neutron_fip = super(NuagePlugin, self).create_floatingip( - context, floatingip) - if not neutron_fip['router_id']: - return neutron_fip - try: - self._create_update_floatingip(context, neutron_fip, - fip['port_id']) - except (nuage_exc.OperationNotSupported, n_exc.BadRequest): - with excutils.save_and_reraise_exception(): - super(NuagePlugin, self).delete_floatingip( - context, neutron_fip['id']) - return neutron_fip - - def disassociate_floatingips(self, context, port_id): - super(NuagePlugin, self).disassociate_floatingips(context, port_id) - port_mapping = nuagedb.get_port_mapping_by_id(context.session, - port_id) - if port_mapping: - params = { - 'nuage_vport_id': port_mapping['nuage_vport_id'], - 'nuage_fip_id': None - } - self.nuageclient.update_nuage_vm_vport(params) - - def update_floatingip(self, context, id, floatingip): - fip = floatingip['floatingip'] - orig_fip = self._get_floatingip(context, id) - port_id = orig_fip['fixed_port_id'] - with context.session.begin(subtransactions=True): - neutron_fip = super(NuagePlugin, self).update_floatingip( - context, id, floatingip) - if fip['port_id'] is not None: - if not neutron_fip['router_id']: - ret_msg = 'floating-ip is not associated yet' - raise n_exc.BadRequest(resource='floatingip', - msg=ret_msg) - - try: - self._create_update_floatingip(context, - neutron_fip, - fip['port_id']) - except nuage_exc.OperationNotSupported: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, - self).disassociate_floatingips(context, - fip['port_id']) - except n_exc.BadRequest: - with excutils.save_and_reraise_exception(): - super(NuagePlugin, self).delete_floatingip(context, - id) - else: - port_mapping = nuagedb.get_port_mapping_by_id(context.session, - port_id) - if port_mapping: - params = { - 'nuage_vport_id': port_mapping['nuage_vport_id'], - 'nuage_fip_id': None - } - self.nuageclient.update_nuage_vm_vport(params) - return neutron_fip - - def delete_floatingip(self, context, id): - fip = self._get_floatingip(context, id) - port_id = fip['fixed_port_id'] - with context.session.begin(subtransactions=True): - if port_id: - port_mapping = nuagedb.get_port_mapping_by_id(context.session, - port_id) - if (port_mapping and - port_mapping['nuage_vport_id'] is not None): - params = { - 'nuage_vport_id': port_mapping['nuage_vport_id'], - 'nuage_fip_id': None - } - self.nuageclient.update_nuage_vm_vport(params) - fip_mapping = nuagedb.get_fip_mapping_by_id(context.session, - id) - if fip_mapping: - self.nuageclient.delete_nuage_floatingip( - fip_mapping['nuage_fip_id']) - nuagedb.delete_fip_mapping(context.session, fip_mapping) - super(NuagePlugin, self).delete_floatingip(context, id) diff --git a/neutron/plugins/ofagent/README b/neutron/plugins/ofagent/README deleted file mode 100644 index a43b0dd07..000000000 --- a/neutron/plugins/ofagent/README +++ /dev/null @@ -1,21 +0,0 @@ -This directory includes agent for OpenFlow Agent mechanism driver. - -# -- Installation - -For how to install/set up ML2 mechanism driver for OpenFlow Agent, please refer to -https://github.com/osrg/ryu/wiki/OpenStack - -# -- Ryu General - -For general Ryu stuff, please refer to -http://www.osrg.net/ryu/ - -Ryu is available at github -git://github.com/osrg/ryu.git -https://github.com/osrg/ryu - -The mailing is at -ryu-devel@lists.sourceforge.net -https://lists.sourceforge.net/lists/listinfo/ryu-devel - -Enjoy! diff --git a/neutron/plugins/ofagent/__init__.py b/neutron/plugins/ofagent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ofagent/agent/__init__.py b/neutron/plugins/ofagent/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ofagent/agent/ofa_neutron_agent.py b/neutron/plugins/ofagent/agent/ofa_neutron_agent.py deleted file mode 100644 index 6e6cd84d7..000000000 --- a/neutron/plugins/ofagent/agent/ofa_neutron_agent.py +++ /dev/null @@ -1,1418 +0,0 @@ -# Copyright (C) 2014 VA Linux Systems Japan K.K. -# Based on openvswitch agent. -# -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. - -import time - -import netaddr -from oslo.config import cfg -from ryu.app.ofctl import api as ryu_api -from ryu.base import app_manager -from ryu.lib import hub -from ryu.ofproto import ofproto_v1_3 as ryu_ofp13 - -from neutron.agent.linux import ip_lib -from neutron.agent.linux import ovs_lib -from neutron.agent.linux import polling -from neutron.agent.linux import utils -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import constants as n_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils as n_utils -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.common import constants as p_const -from neutron.plugins.ofagent.common import config # noqa -from neutron.plugins.openvswitch.common import constants - - -LOG = logging.getLogger(__name__) - -# A placeholder for dead vlans. -DEAD_VLAN_TAG = str(n_const.MAX_VLAN_TAG + 1) - - -# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' -# attributes set). -class LocalVLANMapping: - def __init__(self, vlan, network_type, physical_network, segmentation_id, - vif_ports=None): - if vif_ports is None: - vif_ports = {} - self.vlan = vlan - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id - self.vif_ports = vif_ports - # set of tunnel ports on which packets should be flooded - self.tun_ofports = set() - - def __str__(self): - return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % - (self.vlan, self.network_type, self.physical_network, - self.segmentation_id)) - - -class OVSBridge(ovs_lib.OVSBridge): - def __init__(self, br_name, root_helper, ryuapp): - super(OVSBridge, self).__init__(br_name, root_helper) - self.datapath_id = None - self.datapath = None - self.ofparser = None - self.ryuapp = ryuapp - - def find_datapath_id(self): - self.datapath_id = self.get_datapath_id() - - def get_datapath(self, retry_max=cfg.CONF.AGENT.get_datapath_retry_times): - retry = 0 - while self.datapath is None: - self.datapath = ryu_api.get_datapath(self.ryuapp, - int(self.datapath_id, 16)) - retry += 1 - if retry >= retry_max: - LOG.error(_('Agent terminated!: Failed to get a datapath.')) - raise SystemExit(1) - time.sleep(1) - self.ofparser = self.datapath.ofproto_parser - - def setup_ofp(self, controller_names=None, - protocols='OpenFlow13', - retry_max=cfg.CONF.AGENT.get_datapath_retry_times): - if not controller_names: - host = cfg.CONF.ofp_listen_host - if not host: - # 127.0.0.1 is a default for agent style of controller - host = '127.0.0.1' - controller_names = ["tcp:%s:%d" % (host, - cfg.CONF.ofp_tcp_listen_port)] - try: - self.set_protocols(protocols) - self.set_controller(controller_names) - except RuntimeError: - LOG.exception(_("Agent terminated")) - raise SystemExit(1) - self.find_datapath_id() - self.get_datapath(retry_max) - - -class OFAPluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class OFASecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): - def __init__(self, context, plugin_rpc, root_helper): - self.context = context - self.plugin_rpc = plugin_rpc - self.root_helper = root_helper - self.init_firewall(defer_refresh_firewall=True) - - -class OFANeutronAgentRyuApp(app_manager.RyuApp): - OFP_VERSIONS = [ryu_ofp13.OFP_VERSION] - - def start(self): - - super(OFANeutronAgentRyuApp, self).start() - return hub.spawn(self._agent_main, self) - - def _agent_main(self, ryuapp): - cfg.CONF.register_opts(ip_lib.OPTS) - n_utils.log_opt_values(LOG) - - try: - agent_config = create_agent_config_map(cfg.CONF) - except ValueError: - LOG.exception(_("Agent failed to create agent config map")) - raise SystemExit(1) - - is_xen_compute_host = ('rootwrap-xen-dom0' in - agent_config['root_helper']) - if is_xen_compute_host: - # Force ip_lib to always use the root helper to ensure that ip - # commands target xen dom0 rather than domU. - cfg.CONF.set_default('ip_lib_force_root', True) - - agent = OFANeutronAgent(ryuapp, **agent_config) - - # Start everything. - LOG.info(_("Agent initialized successfully, now running... ")) - agent.daemon_loop() - - -class OFANeutronAgent(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - """A agent for OpenFlow Agent ML2 mechanism driver. - - OFANeutronAgent is a OpenFlow Agent agent for a ML2 plugin. - This is as a ryu application thread. - - An agent acts as an OpenFlow controller on each compute nodes. - - OpenFlow 1.3 (vendor agnostic unlike OVS extensions). - """ - - # history - # 1.0 Initial version - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - def __init__(self, ryuapp, integ_br, tun_br, local_ip, - bridge_mappings, root_helper, - polling_interval, tunnel_types=None, - veth_mtu=None, l2_population=False, - minimize_polling=False, - ovsdb_monitor_respawn_interval=( - constants.DEFAULT_OVSDBMON_RESPAWN)): - """Constructor. - - :param ryuapp: object of the ryu app. - :param integ_br: name of the integration bridge. - :param tun_br: name of the tunnel bridge. - :param local_ip: local IP address of this hypervisor. - :param bridge_mappings: mappings from physical network name to bridge. - :param root_helper: utility to use when running shell cmds. - :param polling_interval: interval (secs) to poll DB. - :param tunnel_types: A list of tunnel types to enable support for in - the agent. If set, will automatically set enable_tunneling to - True. - :param veth_mtu: MTU size for veth interfaces. - :param minimize_polling: Optional, whether to minimize polling by - monitoring ovsdb for interface changes. - :param ovsdb_monitor_respawn_interval: Optional, when using polling - minimization, the number of seconds to wait before respawning - the ovsdb monitor. - """ - super(OFANeutronAgent, self).__init__() - self.ryuapp = ryuapp - self.veth_mtu = veth_mtu - self.root_helper = root_helper - self.available_local_vlans = set(xrange(n_const.MIN_VLAN_TAG, - n_const.MAX_VLAN_TAG)) - self.tunnel_types = tunnel_types or [] - self.l2_pop = l2_population - self.agent_state = { - 'binary': 'neutron-ofa-agent', - 'host': cfg.CONF.host, - 'topic': n_const.L2_AGENT_TOPIC, - 'configurations': {'bridge_mappings': bridge_mappings, - 'tunnel_types': self.tunnel_types, - 'tunneling_ip': local_ip, - 'l2_population': self.l2_pop}, - 'agent_type': n_const.AGENT_TYPE_OFA, - 'start_flag': True} - - # Keep track of int_br's device count for use by _report_state() - self.int_br_device_count = 0 - - self.int_br = OVSBridge(integ_br, self.root_helper, self.ryuapp) - # Stores port update notifications for processing in main loop - self.updated_ports = set() - self.setup_rpc() - self.setup_integration_br() - self.setup_physical_bridges(bridge_mappings) - self.local_vlan_map = {} - self.tun_br_ofports = {p_const.TYPE_GRE: {}, - p_const.TYPE_VXLAN: {}} - - self.polling_interval = polling_interval - self.minimize_polling = minimize_polling - self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval - - self.enable_tunneling = bool(self.tunnel_types) - self.local_ip = local_ip - self.tunnel_count = 0 - self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port - self.dont_fragment = cfg.CONF.AGENT.dont_fragment - if self.enable_tunneling: - self.setup_tunnel_br(tun_br) - # Collect additional bridges to monitor - self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) - - # Security group agent support - self.sg_agent = OFASecurityGroupAgent(self.context, - self.plugin_rpc, - self.root_helper) - # Initialize iteration counter - self.iter_num = 0 - - def _report_state(self): - # How many devices are likely used by a VM - self.agent_state.get('configurations')['devices'] = ( - self.int_br_device_count) - try: - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def _create_tunnel_port_name(self, tunnel_type, ip_address): - try: - ip_hex = '%08x' % netaddr.IPAddress(ip_address, version=4) - return '%s-%s' % (tunnel_type, ip_hex) - except Exception: - LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), - ip_address) - - def ryu_send_msg(self, msg): - result = ryu_api.send_msg(self.ryuapp, msg) - LOG.info(_("ryu send_msg() result: %s"), result) - - def setup_rpc(self): - mac = self.int_br.get_local_port_mac() - self.agent_id = '%s%s' % ('ovs', (mac.replace(":", ""))) - self.topic = topics.AGENT - self.plugin_rpc = OFAPluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - - # RPC network init - self.context = context.get_admin_context_without_session() - # Handle updates from service - self.endpoints = [self] - # Define the listening consumers for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.NETWORK, topics.DELETE], - [constants.TUNNEL, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - report_interval = cfg.CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def get_net_uuid(self, vif_id): - for network_id, vlan_mapping in self.local_vlan_map.iteritems(): - if vif_id in vlan_mapping.vif_ports: - return network_id - - def network_delete(self, context, **kwargs): - network_id = kwargs.get('network_id') - LOG.debug(_("network_delete received network %s"), network_id) - # The network may not be defined on this agent - lvm = self.local_vlan_map.get(network_id) - if lvm: - self.reclaim_local_vlan(network_id) - else: - LOG.debug(_("Network %s not used on agent."), network_id) - - def port_update(self, context, **kwargs): - port = kwargs.get('port') - # Put the port identifier in the updated_ports set. - # Even if full port details might be provided to this call, - # they are not used since there is no guarantee the notifications - # are processed in the same order as the relevant API requests - self.updated_ports.add(port['id']) - LOG.debug(_("port_update received port %s"), port['id']) - - def tunnel_update(self, context, **kwargs): - LOG.debug(_("tunnel_update received")) - if not self.enable_tunneling: - return - tunnel_ip = kwargs.get('tunnel_ip') - tunnel_type = kwargs.get('tunnel_type') - if not tunnel_type: - LOG.error(_("No tunnel_type specified, cannot create tunnels")) - return - if tunnel_type not in self.tunnel_types: - LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) - return - if tunnel_ip == self.local_ip: - return - tun_name = self._create_tunnel_port_name(tunnel_type, tunnel_ip) - if not tun_name: - return - self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) - - def _provision_local_vlan_outbound_for_tunnel(self, lvid, - segmentation_id, ofports): - br = self.tun_br - match = br.ofparser.OFPMatch( - vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT) - actions = [br.ofparser.OFPActionPopVlan(), - br.ofparser.OFPActionSetField( - tunnel_id=int(segmentation_id))] - for ofport in ofports: - actions.append(br.ofparser.OFPActionOutput(ofport, 0)) - instructions = [br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, actions)] - msg = br.ofparser.OFPFlowMod( - br.datapath, - table_id=constants.FLOOD_TO_TUN, - priority=1, - match=match, instructions=instructions) - self.ryu_send_msg(msg) - - def _provision_local_vlan_inbound_for_tunnel(self, lvid, network_type, - segmentation_id): - br = self.tun_br - match = br.ofparser.OFPMatch( - tunnel_id=int(segmentation_id)) - actions = [ - br.ofparser.OFPActionPushVlan(), - br.ofparser.OFPActionSetField( - vlan_vid=int(lvid) | ryu_ofp13.OFPVID_PRESENT)] - instructions = [ - br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, actions), - br.ofparser.OFPInstructionGotoTable( - table_id=constants.LEARN_FROM_TUN)] - msg = br.ofparser.OFPFlowMod( - br.datapath, - table_id=constants.TUN_TABLE[network_type], - priority=1, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - def _local_vlan_for_tunnel(self, lvid, network_type, segmentation_id): - ofports = [int(ofport) for ofport in - self.tun_br_ofports[network_type].values()] - if ofports: - self._provision_local_vlan_outbound_for_tunnel( - lvid, segmentation_id, ofports) - self._provision_local_vlan_inbound_for_tunnel(lvid, network_type, - segmentation_id) - - def _provision_local_vlan_outbound(self, lvid, vlan_vid, physical_network): - br = self.phys_brs[physical_network] - datapath = br.datapath - ofp = datapath.ofproto - ofpp = datapath.ofproto_parser - match = ofpp.OFPMatch(in_port=int(self.phys_ofports[physical_network]), - vlan_vid=int(lvid) | ofp.OFPVID_PRESENT) - if vlan_vid == ofp.OFPVID_NONE: - actions = [ofpp.OFPActionPopVlan()] - else: - actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)] - actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)] - instructions = [ - ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), - ] - msg = ofpp.OFPFlowMod(datapath, priority=4, match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - def _provision_local_vlan_inbound(self, lvid, vlan_vid, physical_network): - datapath = self.int_br.datapath - ofp = datapath.ofproto - ofpp = datapath.ofproto_parser - match = ofpp.OFPMatch(in_port=int(self.int_ofports[physical_network]), - vlan_vid=vlan_vid) - if vlan_vid == ofp.OFPVID_NONE: - actions = [ofpp.OFPActionPushVlan()] - else: - actions = [] - actions += [ - ofpp.OFPActionSetField(vlan_vid=int(lvid) | ofp.OFPVID_PRESENT), - ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0), - ] - instructions = [ - ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions), - ] - msg = ofpp.OFPFlowMod(datapath, priority=3, match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - def _local_vlan_for_flat(self, lvid, physical_network): - vlan_vid = ryu_ofp13.OFPVID_NONE - self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) - self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) - - def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): - vlan_vid = int(segmentation_id) | ryu_ofp13.OFPVID_PRESENT - self._provision_local_vlan_outbound(lvid, vlan_vid, physical_network) - self._provision_local_vlan_inbound(lvid, vlan_vid, physical_network) - - def provision_local_vlan(self, net_uuid, network_type, physical_network, - segmentation_id): - """Provisions a local VLAN. - - :param net_uuid: the uuid of the network associated with this vlan. - :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', - 'local') - :param physical_network: the physical network for 'vlan' or 'flat' - :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' - """ - - if not self.available_local_vlans: - LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) - return - lvid = self.available_local_vlans.pop() - LOG.info(_("Assigning %(vlan_id)s as local vlan for " - "net-id=%(net_uuid)s"), - {'vlan_id': lvid, 'net_uuid': net_uuid}) - self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, network_type, - physical_network, - segmentation_id) - - if network_type in constants.TUNNEL_NETWORK_TYPES: - if self.enable_tunneling: - self._local_vlan_for_tunnel(lvid, network_type, - segmentation_id) - else: - LOG.error(_("Cannot provision %(network_type)s network for " - "net-id=%(net_uuid)s - tunneling disabled"), - {'network_type': network_type, - 'net_uuid': net_uuid}) - elif network_type == p_const.TYPE_FLAT: - if physical_network in self.phys_brs: - self._local_vlan_for_flat(lvid, physical_network) - else: - LOG.error(_("Cannot provision flat network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), - {'net_uuid': net_uuid, - 'physical_network': physical_network}) - elif network_type == p_const.TYPE_VLAN: - if physical_network in self.phys_brs: - self._local_vlan_for_vlan(lvid, physical_network, - segmentation_id) - else: - LOG.error(_("Cannot provision VLAN network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), - {'net_uuid': net_uuid, - 'physical_network': physical_network}) - elif network_type == p_const.TYPE_LOCAL: - # no flows needed for local networks - pass - else: - LOG.error(_("Cannot provision unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), - {'network_type': network_type, - 'net_uuid': net_uuid}) - - def _reclaim_local_vlan_outbound(self, lvm): - br = self.phys_brs[lvm.physical_network] - datapath = br.datapath - ofp = datapath.ofproto - ofpp = datapath.ofproto_parser - match = ofpp.OFPMatch( - in_port=int(self.phys_ofports[lvm.physical_network]), - vlan_vid=int(lvm.vlan) | ofp.OFPVID_PRESENT) - msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, - command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, - out_port=ofp.OFPP_ANY, match=match) - self.ryu_send_msg(msg) - - def _reclaim_local_vlan_inbound(self, lvm): - datapath = self.int_br.datapath - ofp = datapath.ofproto - ofpp = datapath.ofproto_parser - if lvm.network_type == p_const.TYPE_FLAT: - vid = ofp.OFPVID_NONE - else: # p_const.TYPE_VLAN - vid = lvm.segmentation_id | ofp.OFPVID_PRESENT - match = ofpp.OFPMatch( - in_port=int(self.int_ofports[lvm.physical_network]), - vlan_vid=vid) - msg = ofpp.OFPFlowMod(datapath, table_id=ofp.OFPTT_ALL, - command=ofp.OFPFC_DELETE, out_group=ofp.OFPG_ANY, - out_port=ofp.OFPP_ANY, match=match) - self.ryu_send_msg(msg) - - def reclaim_local_vlan(self, net_uuid): - """Reclaim a local VLAN. - - :param net_uuid: the network uuid associated with this vlan. - :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, - vif_ids) mapping. - """ - lvm = self.local_vlan_map.pop(net_uuid, None) - if lvm is None: - LOG.debug(_("Network %s not used on agent."), net_uuid) - return - - LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), - {'vlan_id': lvm.vlan, - 'net_uuid': net_uuid}) - - if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: - if self.enable_tunneling: - match = self.tun_br.ofparser.OFPMatch( - tunnel_id=int(lvm.segmentation_id)) - msg = self.tun_br.ofparser.OFPFlowMod( - self.tun_br.datapath, - table_id=constants.TUN_TABLE[lvm.network_type], - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY, - match=match) - self.ryu_send_msg(msg) - match = self.tun_br.ofparser.OFPMatch( - vlan_vid=int(lvm.vlan) | ryu_ofp13.OFPVID_PRESENT) - msg = self.tun_br.ofparser.OFPFlowMod( - self.tun_br.datapath, - table_id=ryu_ofp13.OFPTT_ALL, - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY, - match=match) - self.ryu_send_msg(msg) - elif lvm.network_type in (p_const.TYPE_FLAT, p_const.TYPE_VLAN): - if lvm.physical_network in self.phys_brs: - self._reclaim_local_vlan_outbound(lvm) - self._reclaim_local_vlan_inbound(lvm) - elif lvm.network_type == p_const.TYPE_LOCAL: - # no flows needed for local networks - pass - else: - LOG.error(_("Cannot reclaim unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), - {'network_type': lvm.network_type, - 'net_uuid': net_uuid}) - - self.available_local_vlans.add(lvm.vlan) - - def port_bound(self, port, net_uuid, - network_type, physical_network, segmentation_id): - """Bind port to net_uuid/lsw_id and install flow for inbound traffic - to vm. - - :param port: a ovs_lib.VifPort object. - :param net_uuid: the net_uuid this port is to be associated with. - :param network_type: the network type ('gre', 'vlan', 'flat', 'local') - :param physical_network: the physical network for 'vlan' or 'flat' - :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' - """ - if net_uuid not in self.local_vlan_map: - self.provision_local_vlan(net_uuid, network_type, - physical_network, segmentation_id) - lvm = self.local_vlan_map[net_uuid] - lvm.vif_ports[port.vif_id] = port - # Do not bind a port if it's already bound - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") - if cur_tag != str(lvm.vlan): - self.int_br.set_db_attribute("Port", port.port_name, "tag", - str(lvm.vlan)) - if port.ofport != -1: - match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) - msg = self.int_br.ofparser.OFPFlowMod( - self.int_br.datapath, - table_id=ryu_ofp13.OFPTT_ALL, - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY, - match=match) - self.ryu_send_msg(msg) - - def port_unbound(self, vif_id, net_uuid=None): - """Unbind port. - - Removes corresponding local vlan mapping object if this is its last - VIF. - - :param vif_id: the id of the vif - :param net_uuid: the net_uuid this port is associated with. - """ - net_uuid = net_uuid or self.get_net_uuid(vif_id) - - if not self.local_vlan_map.get(net_uuid): - LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'), - net_uuid) - return - - lvm = self.local_vlan_map[net_uuid] - lvm.vif_ports.pop(vif_id, None) - - if not lvm.vif_ports: - self.reclaim_local_vlan(net_uuid) - - def port_dead(self, port): - """Once a port has no binding, put it on the "dead vlan". - - :param port: a ovs_lib.VifPort object. - """ - # Don't kill a port if it's already dead - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") - if cur_tag != DEAD_VLAN_TAG: - self.int_br.set_db_attribute("Port", port.port_name, "tag", - DEAD_VLAN_TAG) - match = self.int_br.ofparser.OFPMatch(in_port=port.ofport) - msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, - priority=2, match=match) - self.ryu_send_msg(msg) - - def setup_integration_br(self): - """Setup the integration bridge. - - Create patch ports and remove all existing flows. - - :param bridge_name: the name of the integration bridge. - :returns: the integration bridge - """ - self.int_br.setup_ofp() - self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) - msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, - table_id=ryu_ofp13.OFPTT_ALL, - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY) - self.ryu_send_msg(msg) - # switch all traffic using L2 learning - actions = [self.int_br.ofparser.OFPActionOutput( - ryu_ofp13.OFPP_NORMAL, 0)] - instructions = [self.int_br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, - actions)] - msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, - priority=1, - instructions=instructions) - self.ryu_send_msg(msg) - - def setup_ancillary_bridges(self, integ_br, tun_br): - """Setup ancillary bridges - for example br-ex.""" - ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) - # Remove all known bridges - ovs_bridges.remove(integ_br) - if self.enable_tunneling: - ovs_bridges.remove(tun_br) - br_names = [self.phys_brs[physical_network].br_name for - physical_network in self.phys_brs] - ovs_bridges.difference_update(br_names) - # Filter list of bridges to those that have external - # bridge-id's configured - br_names = [ - bridge for bridge in ovs_bridges - if bridge != ovs_lib.get_bridge_external_bridge_id( - self.root_helper, bridge) - ] - ovs_bridges.difference_update(br_names) - ancillary_bridges = [] - for bridge in ovs_bridges: - br = OVSBridge(bridge, self.root_helper, self.ryuapp) - ancillary_bridges.append(br) - LOG.info(_('ancillary bridge list: %s.'), ancillary_bridges) - return ancillary_bridges - - def _tun_br_sort_incoming_traffic_depend_in_port(self, br): - match = br.ofparser.OFPMatch( - in_port=int(self.patch_int_ofport)) - instructions = [br.ofparser.OFPInstructionGotoTable( - table_id=constants.PATCH_LV_TO_TUN)] - msg = br.ofparser.OFPFlowMod(br.datapath, - priority=1, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - msg = br.ofparser.OFPFlowMod(br.datapath, priority=0) - self.ryu_send_msg(msg) - - def _tun_br_goto_table_ucast_unicast(self, br): - match = br.ofparser.OFPMatch(eth_dst=('00:00:00:00:00:00', - '01:00:00:00:00:00')) - instructions = [br.ofparser.OFPInstructionGotoTable( - table_id=constants.UCAST_TO_TUN)] - msg = br.ofparser.OFPFlowMod(br.datapath, - table_id=constants.PATCH_LV_TO_TUN, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - def _tun_br_goto_table_flood_broad_multi_cast(self, br): - match = br.ofparser.OFPMatch(eth_dst=('01:00:00:00:00:00', - '01:00:00:00:00:00')) - instructions = [br.ofparser.OFPInstructionGotoTable( - table_id=constants.FLOOD_TO_TUN)] - msg = br.ofparser.OFPFlowMod(br.datapath, - table_id=constants.PATCH_LV_TO_TUN, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - def _tun_br_set_table_tun_by_tunnel_type(self, br): - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - msg = br.ofparser.OFPFlowMod( - br.datapath, - table_id=constants.TUN_TABLE[tunnel_type], - priority=0) - self.ryu_send_msg(msg) - - def _tun_br_output_patch_int(self, br): - actions = [br.ofparser.OFPActionOutput( - int(self.patch_int_ofport), 0)] - instructions = [br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, - actions)] - msg = br.ofparser.OFPFlowMod(br.datapath, - table_id=constants.LEARN_FROM_TUN, - priority=1, - instructions=instructions) - self.ryu_send_msg(msg) - - def _tun_br_goto_table_flood_unknown_unicast(self, br): - instructions = [br.ofparser.OFPInstructionGotoTable( - table_id=constants.FLOOD_TO_TUN)] - msg = br.ofparser.OFPFlowMod(br.datapath, - table_id=constants.UCAST_TO_TUN, - priority=0, - instructions=instructions) - self.ryu_send_msg(msg) - - def _tun_br_default_drop(self, br): - msg = br.ofparser.OFPFlowMod( - br.datapath, - table_id=constants.FLOOD_TO_TUN, - priority=0) - self.ryu_send_msg(msg) - - def setup_tunnel_br(self, tun_br): - """Setup the tunnel bridge. - - Creates tunnel bridge, and links it to the integration bridge - using a patch port. - - :param tun_br: the name of the tunnel bridge. - """ - self.tun_br = OVSBridge(tun_br, self.root_helper, self.ryuapp) - self.tun_br.reset_bridge() - self.tun_br.setup_ofp() - self.patch_tun_ofport = self.int_br.add_patch_port( - cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) - self.patch_int_ofport = self.tun_br.add_patch_port( - cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) - if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: - LOG.error(_("Failed to create OVS patch port. Cannot have " - "tunneling enabled on this agent, since this version " - "of OVS does not support tunnels or patch ports. " - "Agent terminated!")) - raise SystemExit(1) - msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, - table_id=ryu_ofp13.OFPTT_ALL, - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY) - self.ryu_send_msg(msg) - - self._tun_br_sort_incoming_traffic_depend_in_port(self.tun_br) - self._tun_br_goto_table_ucast_unicast(self.tun_br) - self._tun_br_goto_table_flood_broad_multi_cast(self.tun_br) - self._tun_br_set_table_tun_by_tunnel_type(self.tun_br) - self._tun_br_output_patch_int(self.tun_br) - self._tun_br_goto_table_flood_unknown_unicast(self.tun_br) - self._tun_br_default_drop(self.tun_br) - - def _phys_br_prepare_create_veth(self, br, int_veth_name, phys_veth_name): - self.int_br.delete_port(int_veth_name) - br.delete_port(phys_veth_name) - if ip_lib.device_exists(int_veth_name, self.root_helper): - ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete() - # Give udev a chance to process its rules here, to avoid - # race conditions between commands launched by udev rules - # and the subsequent call to ip_wrapper.add_veth - utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) - - def _phys_br_create_veth(self, br, int_veth_name, - phys_veth_name, physical_network, ip_wrapper): - int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name, - phys_veth_name) - self.int_ofports[physical_network] = self.int_br.add_port(int_veth) - self.phys_ofports[physical_network] = br.add_port(phys_veth) - return (int_veth, phys_veth) - - def _phys_br_block_untranslated_traffic(self, br, physical_network): - match = self.int_br.ofparser.OFPMatch(in_port=int( - self.int_ofports[physical_network])) - msg = self.int_br.ofparser.OFPFlowMod(self.int_br.datapath, - priority=2, match=match) - self.ryu_send_msg(msg) - match = br.ofparser.OFPMatch(in_port=int( - self.phys_ofports[physical_network])) - msg = br.ofparser.OFPFlowMod(br.datapath, priority=2, match=match) - self.ryu_send_msg(msg) - - def _phys_br_enable_veth_to_pass_traffic(self, int_veth, phys_veth): - # enable veth to pass traffic - int_veth.link.set_up() - phys_veth.link.set_up() - - if self.veth_mtu: - # set up mtu size for veth interfaces - int_veth.link.set_mtu(self.veth_mtu) - phys_veth.link.set_mtu(self.veth_mtu) - - def _phys_br_patch_physical_bridge_with_integration_bridge( - self, br, physical_network, bridge, ip_wrapper): - int_veth_name = constants.VETH_INTEGRATION_PREFIX + bridge - phys_veth_name = constants.VETH_PHYSICAL_PREFIX + bridge - self._phys_br_prepare_create_veth(br, int_veth_name, phys_veth_name) - int_veth, phys_veth = self._phys_br_create_veth(br, int_veth_name, - phys_veth_name, - physical_network, - ip_wrapper) - self._phys_br_block_untranslated_traffic(br, physical_network) - self._phys_br_enable_veth_to_pass_traffic(int_veth, phys_veth) - - def setup_physical_bridges(self, bridge_mappings): - """Setup the physical network bridges. - - Creates physical network bridges and links them to the - integration bridge using veths. - - :param bridge_mappings: map physical network names to bridge names. - """ - self.phys_brs = {} - self.int_ofports = {} - self.phys_ofports = {} - ip_wrapper = ip_lib.IPWrapper(self.root_helper) - for physical_network, bridge in bridge_mappings.iteritems(): - LOG.info(_("Mapping physical network %(physical_network)s to " - "bridge %(bridge)s"), - {'physical_network': physical_network, - 'bridge': bridge}) - # setup physical bridge - if not ip_lib.device_exists(bridge, self.root_helper): - LOG.error(_("Bridge %(bridge)s for physical network " - "%(physical_network)s does not exist. Agent " - "terminated!"), - {'physical_network': physical_network, - 'bridge': bridge}) - raise SystemExit(1) - br = OVSBridge(bridge, self.root_helper, self.ryuapp) - br.setup_ofp() - msg = br.ofparser.OFPFlowMod(br.datapath, - table_id=ryu_ofp13.OFPTT_ALL, - command=ryu_ofp13.OFPFC_DELETE, - out_group=ryu_ofp13.OFPG_ANY, - out_port=ryu_ofp13.OFPP_ANY) - self.ryu_send_msg(msg) - actions = [br.ofparser.OFPActionOutput(ryu_ofp13.OFPP_NORMAL, 0)] - instructions = [br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, - actions)] - msg = br.ofparser.OFPFlowMod(br.datapath, - priority=1, - instructions=instructions) - self.ryu_send_msg(msg) - self.phys_brs[physical_network] = br - - self._phys_br_patch_physical_bridge_with_integration_bridge( - br, physical_network, bridge, ip_wrapper) - - def scan_ports(self, registered_ports, updated_ports=None): - cur_ports = self.int_br.get_vif_port_set() - self.int_br_device_count = len(cur_ports) - port_info = {'current': cur_ports} - if updated_ports is None: - updated_ports = set() - updated_ports.update(self._find_lost_vlan_port(registered_ports)) - if updated_ports: - # Some updated ports might have been removed in the - # meanwhile, and therefore should not be processed. - # In this case the updated port won't be found among - # current ports. - updated_ports &= cur_ports - if updated_ports: - port_info['updated'] = updated_ports - - if cur_ports == registered_ports: - # No added or removed ports to set, just return here - return port_info - - port_info['added'] = cur_ports - registered_ports - # Remove all the known ports not found on the integration bridge - port_info['removed'] = registered_ports - cur_ports - return port_info - - def _find_lost_vlan_port(self, registered_ports): - """Return ports which have lost their vlan tag. - - The returned value is a set of port ids of the ports concerned by a - vlan tag loss. - """ - port_tags = self.int_br.get_port_tag_dict() - changed_ports = set() - for lvm in self.local_vlan_map.values(): - for port in registered_ports: - if ( - port in lvm.vif_ports - and lvm.vif_ports[port].port_name in port_tags - and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan - ): - LOG.info( - _("Port '%(port_name)s' has lost " - "its vlan tag '%(vlan_tag)d'!"), - {'port_name': lvm.vif_ports[port].port_name, - 'vlan_tag': lvm.vlan} - ) - changed_ports.add(port) - return changed_ports - - def update_ancillary_ports(self, registered_ports): - ports = set() - for bridge in self.ancillary_brs: - ports |= bridge.get_vif_port_set() - - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def treat_vif_port(self, vif_port, port_id, network_id, network_type, - physical_network, segmentation_id, admin_state_up): - if vif_port: - # When this function is called for a port, the port should have - # an OVS ofport configured, as only these ports were considered - # for being treated. If that does not happen, it is a potential - # error condition of which operators should be aware - if not vif_port.ofport: - LOG.warn(_("VIF port: %s has no ofport configured, and might " - "not be able to transmit"), vif_port.vif_id) - if admin_state_up: - self.port_bound(vif_port, network_id, network_type, - physical_network, segmentation_id) - else: - self.port_dead(vif_port) - else: - LOG.debug(_("No VIF port for port %s defined on agent."), port_id) - - def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): - ofport = self.tun_br.add_tunnel_port(port_name, - remote_ip, - self.local_ip, - tunnel_type, - self.vxlan_udp_port, - self.dont_fragment) - ofport_int = -1 - try: - ofport_int = int(ofport) - except (TypeError, ValueError): - LOG.exception(_("ofport should have a value that can be " - "interpreted as an integer")) - if ofport_int < 0: - LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), - {'type': tunnel_type, 'ip': remote_ip}) - return 0 - - self.tun_br_ofports[tunnel_type][remote_ip] = ofport - # Add flow in default table to resubmit to the right - # tunelling table (lvid will be set in the latter) - match = self.tun_br.ofparser.OFPMatch(in_port=int(ofport)) - instructions = [self.tun_br.ofparser.OFPInstructionGotoTable( - table_id=constants.TUN_TABLE[tunnel_type])] - msg = self.tun_br.ofparser.OFPFlowMod(self.tun_br.datapath, - priority=1, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - - ofports = [int(p) for p in self.tun_br_ofports[tunnel_type].values()] - if ofports: - # Update flooding flows to include the new tunnel - for network_id, vlan_mapping in self.local_vlan_map.iteritems(): - if vlan_mapping.network_type == tunnel_type: - match = self.tun_br.ofparser.OFPMatch( - vlan_vid=int(vlan_mapping.vlan) | - ryu_ofp13.OFPVID_PRESENT) - actions = [ - self.tun_br.ofparser.OFPActionPopVlan(), - self.tun_br.ofparser.OFPActionSetField( - tunnel_id=int(vlan_mapping.segmentation_id))] - actions.extend( - self.tun_br.ofparser.OFPActionOutput(p, 0) - for p in ofports - ) - instructions = [ - self.tun_br.ofparser.OFPInstructionActions( - ryu_ofp13.OFPIT_APPLY_ACTIONS, - actions)] - msg = self.tun_br.ofparser.OFPFlowMod( - self.tun_br.datapath, - table_id=constants.FLOOD_TO_TUN, - priority=1, - match=match, - instructions=instructions) - self.ryu_send_msg(msg) - return ofport - - def treat_devices_added_or_updated(self, devices): - resync = False - for device in devices: - LOG.debug(_("Processing port %s"), device) - port = self.int_br.get_vif_port_by_id(device) - if not port: - # The port has disappeared and should not be processed - # There is no need to put the port DOWN in the plugin as - # it never went up in the first place - LOG.info(_("Port %s was not found on the integration bridge " - "and will therefore not be processed"), device) - continue - try: - details = self.plugin_rpc.get_device_details(self.context, - device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get port details for " - "%(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if 'port_id' in details: - LOG.info(_("Port %(device)s updated. Details: %(details)s"), - {'device': device, 'details': details}) - self.treat_vif_port(port, details['port_id'], - details['network_id'], - details['network_type'], - details['physical_network'], - details['segmentation_id'], - details['admin_state_up']) - - # update plugin about port status - if details.get('admin_state_up'): - LOG.debug(_("Setting status for %s to UP"), device) - self.plugin_rpc.update_device_up( - self.context, device, self.agent_id, cfg.CONF.host) - else: - LOG.debug(_("Setting status for %s to DOWN"), device) - self.plugin_rpc.update_device_down( - self.context, device, self.agent_id, cfg.CONF.host) - LOG.info(_("Configuration for device %s completed."), device) - else: - LOG.warn(_("Device %s not defined on plugin"), device) - if (port and port.ofport != -1): - self.port_dead(port) - return resync - - def treat_ancillary_devices_added(self, devices): - resync = False - for device in devices: - LOG.info(_("Ancillary Port %s added"), device) - try: - self.plugin_rpc.get_device_details(self.context, device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get port details for " - "%(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - - # update plugin about port status - self.plugin_rpc.update_device_up(self.context, - device, - self.agent_id, - cfg.CONF.host) - return resync - - def treat_devices_removed(self, devices): - resync = False - self.sg_agent.remove_devices_filter(devices) - for device in devices: - LOG.info(_("Attachment %s removed"), device) - try: - self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("port_removed failed for %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - self.port_unbound(device) - return resync - - def treat_ancillary_devices_removed(self, devices): - resync = False - for device in devices: - LOG.info(_("Attachment %s removed"), device) - try: - details = self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("port_removed failed for %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if details['exists']: - LOG.info(_("Port %s updated."), device) - # Nothing to do regarding local networking - else: - LOG.debug(_("Device %s not defined on plugin"), device) - return resync - - def process_network_ports(self, port_info): - resync_add = False - resync_removed = False - # If there is an exception while processing security groups ports - # will not be wired anyway, and a resync will be triggered - self.sg_agent.setup_port_filters(port_info.get('added', set()), - port_info.get('updated', set())) - # VIF wiring needs to be performed always for 'new' devices. - # For updated ports, re-wiring is not needed in most cases, but needs - # to be performed anyway when the admin state of a device is changed. - # A device might be both in the 'added' and 'updated' - # list at the same time; avoid processing it twice. - devices_added_updated = (port_info.get('added', set()) | - port_info.get('updated', set())) - if devices_added_updated: - start = time.time() - resync_add = self.treat_devices_added_or_updated( - devices_added_updated) - LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " - "treat_devices_added_or_updated completed " - "in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - if 'removed' in port_info: - start = time.time() - resync_removed = self.treat_devices_removed(port_info['removed']) - LOG.debug(_("process_network_ports - iteration:%(iter_num)d - " - "treat_devices_removed completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # If one of the above opertaions fails => resync with plugin - return (resync_add | resync_removed) - - def process_ancillary_network_ports(self, port_info): - resync_add = False - resync_removed = False - if 'added' in port_info: - start = time.time() - resync_add = self.treat_ancillary_devices_added(port_info['added']) - LOG.debug(_("process_ancillary_network_ports - iteration: " - "%(iter_num)d - treat_ancillary_devices_added " - "completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - if 'removed' in port_info: - start = time.time() - resync_removed = self.treat_ancillary_devices_removed( - port_info['removed']) - LOG.debug(_("process_ancillary_network_ports - iteration: " - "%(iter_num)d - treat_ancillary_devices_removed " - "completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - - # If one of the above opertaions fails => resync with plugin - return (resync_add | resync_removed) - - def tunnel_sync(self): - resync = False - try: - for tunnel_type in self.tunnel_types: - details = self.plugin_rpc.tunnel_sync(self.context, - self.local_ip, - tunnel_type) - tunnels = details['tunnels'] - for tunnel in tunnels: - if self.local_ip != tunnel['ip_address']: - tun_name = self._create_tunnel_port_name( - tunnel_type, tunnel['ip_address']) - if not tun_name: - continue - self.setup_tunnel_port(tun_name, - tunnel['ip_address'], - tunnel_type) - except Exception as e: - LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), - {'local_ip': self.local_ip, 'e': e}) - resync = True - return resync - - def _agent_has_updates(self, polling_manager): - return (polling_manager.is_polling_required or - self.updated_ports or - self.sg_agent.firewall_refresh_needed()) - - def _port_info_has_changes(self, port_info): - return (port_info.get('added') or - port_info.get('removed') or - port_info.get('updated')) - - def ovsdb_monitor_loop(self, polling_manager=None): - if not polling_manager: - polling_manager = polling.AlwaysPoll() - - sync = True - ports = set() - updated_ports_copy = set() - ancillary_ports = set() - tunnel_sync = True - while True: - start = time.time() - port_stats = {'regular': {'added': 0, 'updated': 0, 'removed': 0}, - 'ancillary': {'added': 0, 'removed': 0}} - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:%d started"), - self.iter_num) - if sync: - LOG.info(_("Agent out of sync with plugin!")) - ports.clear() - ancillary_ports.clear() - sync = False - polling_manager.force_polling() - # Notify the plugin of tunnel IP - if self.enable_tunneling and tunnel_sync: - LOG.info(_("Agent tunnel out of sync with plugin!")) - try: - tunnel_sync = self.tunnel_sync() - except Exception: - LOG.exception(_("Error while synchronizing tunnels")) - tunnel_sync = True - if self._agent_has_updates(polling_manager): - try: - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:%(iter_num)d - " - "starting polling. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # Save updated ports dict to perform rollback in - # case resync would be needed, and then clear - # self.updated_ports. As the greenthread should not yield - # between these two statements, this will be thread-safe - updated_ports_copy = self.updated_ports - self.updated_ports = set() - port_info = self.scan_ports(ports, updated_ports_copy) - ports = port_info['current'] - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:%(iter_num)d - " - "port information retrieved. " - "Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # Secure and wire/unwire VIFs and update their status - # on Neutron server - if (self._port_info_has_changes(port_info) or - self.sg_agent.firewall_refresh_needed()): - LOG.debug(_("Starting to process devices in:%s"), - port_info) - # If treat devices fails - must resync with plugin - sync = self.process_network_ports(port_info) - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:%(iter_num)d - " - "ports processed. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - port_stats['regular']['added'] = ( - len(port_info.get('added', []))) - port_stats['regular']['updated'] = ( - len(port_info.get('updated', []))) - port_stats['regular']['removed'] = ( - len(port_info.get('removed', []))) - # Treat ancillary devices if they exist - if self.ancillary_brs: - port_info = self.update_ancillary_ports( - ancillary_ports) - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:%(iter_num)d - " - "ancillary port info retrieved. " - "Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - - if port_info: - rc = self.process_ancillary_network_ports( - port_info) - LOG.debug(_("Agent ovsdb_monitor_loop - " - "iteration:" - "%(iter_num)d - ancillary ports " - "processed. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - ancillary_ports = port_info['current'] - port_stats['ancillary']['added'] = ( - len(port_info.get('added', []))) - port_stats['ancillary']['removed'] = ( - len(port_info.get('removed', []))) - sync = sync | rc - - polling_manager.polling_completed() - except Exception: - LOG.exception(_("Error while processing VIF ports")) - # Put the ports back in self.updated_port - self.updated_ports |= updated_ports_copy - sync = True - - # sleep till end of polling interval - elapsed = (time.time() - start) - LOG.debug(_("Agent ovsdb_monitor_loop - iteration:%(iter_num)d " - "completed. Processed ports statistics:" - "%(port_stats)s. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'port_stats': port_stats, - 'elapsed': elapsed}) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - self.iter_num = self.iter_num + 1 - - def daemon_loop(self): - with polling.get_polling_manager( - self.minimize_polling, - self.root_helper, - self.ovsdb_monitor_respawn_interval) as pm: - - self.ovsdb_monitor_loop(polling_manager=pm) - - -def create_agent_config_map(config): - """Create a map of agent config parameters. - - :param config: an instance of cfg.CONF - :returns: a map of agent configuration parameters - """ - try: - bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings) - except ValueError as e: - raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) - - kwargs = dict( - integ_br=config.OVS.integration_bridge, - tun_br=config.OVS.tunnel_bridge, - local_ip=config.OVS.local_ip, - bridge_mappings=bridge_mappings, - root_helper=config.AGENT.root_helper, - polling_interval=config.AGENT.polling_interval, - minimize_polling=config.AGENT.minimize_polling, - tunnel_types=config.AGENT.tunnel_types, - veth_mtu=config.AGENT.veth_mtu, - l2_population=False, - ovsdb_monitor_respawn_interval=constants.DEFAULT_OVSDBMON_RESPAWN, - ) - - # If enable_tunneling is TRUE, set tunnel_type to default to GRE - if config.OVS.enable_tunneling and not kwargs['tunnel_types']: - kwargs['tunnel_types'] = [p_const.TYPE_GRE] - - # Verify the tunnel_types specified are valid - for tun in kwargs['tunnel_types']: - if tun not in constants.TUNNEL_NETWORK_TYPES: - msg = _('Invalid tunnel type specificed: %s'), tun - raise ValueError(msg) - if not kwargs['local_ip']: - msg = _('Tunneling cannot be enabled without a valid local_ip.') - raise ValueError(msg) - - return kwargs diff --git a/neutron/plugins/ofagent/common/__init__.py b/neutron/plugins/ofagent/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ofagent/common/config.py b/neutron/plugins/ofagent/common/config.py deleted file mode 100644 index 759d3df1d..000000000 --- a/neutron/plugins/ofagent/common/config.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2014 VA Linux Systems Japan K.K. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Fumihiko Kakuma, VA Linux Systems Japan K.K. - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.plugins.openvswitch.common import config as ovs_config - - -agent_opts = [ - cfg.IntOpt('get_datapath_retry_times', default=60, - help=_("Number of seconds to retry acquiring " - "an Open vSwitch datapath")), -] - - -cfg.CONF.register_opts(ovs_config.ovs_opts, 'OVS') -cfg.CONF.register_opts(ovs_config.agent_opts, 'AGENT') -cfg.CONF.register_opts(agent_opts, 'AGENT') -config.register_agent_state_opts_helper(cfg.CONF) -config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/oneconvergence/README b/neutron/plugins/oneconvergence/README deleted file mode 100644 index 11b26545a..000000000 --- a/neutron/plugins/oneconvergence/README +++ /dev/null @@ -1,32 +0,0 @@ -One Convergence Neutron Plugin to implement the Neutron v2.0 API. The plugin -works with One Convergence NVSD controller to provide network virtualization -functionality. - -The plugin is enabled with the following configuration line in neutron.conf: - -core_plugin = neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2 - -The configuration parameters required for the plugin are specified in the file -etc/neutron/plugins/oneconvergence/nvsdplugin.ini. The configuration file contains -description of the different parameters. - -To enable One Convergence Neutron Plugin with devstack and configure the required -parameters, use the following lines in localrc: - -Q_PLUGIN=oneconvergence - -disable_service n-net -enable_service q-agt -enable_service q-dhcp -enable_service q-svc -enable_service q-l3 -enable_service q-meta -enable_service neutron - -NVSD_IP= -NVSD_PORT= -NVSD_USER= -NVSD_PASSWD= - -The NVSD controller configuration should be specified in nvsdplugin.ini before -invoking stack.sh. diff --git a/neutron/plugins/oneconvergence/__init__.py b/neutron/plugins/oneconvergence/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/oneconvergence/agent/__init__.py b/neutron/plugins/oneconvergence/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py b/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py deleted file mode 100644 index 377cdda1e..000000000 --- a/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kedar Kulkarni, One Convergence, Inc. - -"""NVSD agent code for security group events.""" - -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from neutron.agent.linux import ovs_lib -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import rpc_compat -from neutron.common import topics -from neutron import context as n_context -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import log as logging -from neutron.plugins.oneconvergence.lib import config - -LOG = logging.getLogger(__name__) - - -class NVSDAgentRpcCallback(rpc_compat.RpcCallback): - - RPC_API_VERSION = '1.0' - - def __init__(self, context, agent, sg_agent): - super(NVSDAgentRpcCallback, self).__init__() - self.context = context - self.agent = agent - self.sg_agent = sg_agent - - def port_update(self, context, **kwargs): - LOG.debug(_("port_update received: %s"), kwargs) - port = kwargs.get('port') - # Validate that port is on OVS - vif_port = self.agent.int_br.get_vif_port_by_id(port['id']) - if not vif_port: - return - - if ext_sg.SECURITYGROUPS in port: - self.sg_agent.refresh_firewall() - - -class SecurityGroupServerRpcApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupServerRpcApiMixin): - def __init__(self, topic): - super(SecurityGroupServerRpcApi, self).__init__( - topic=topic, default_version=sg_rpc.SG_RPC_VERSION) - - -class SecurityGroupAgentRpcCallback( - rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - - RPC_API_VERSION = sg_rpc.SG_RPC_VERSION - - def __init__(self, context, sg_agent): - super(SecurityGroupAgentRpcCallback, self).__init__() - self.context = context - self.sg_agent = sg_agent - - -class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin): - - def __init__(self, context, root_helper): - self.context = context - - self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN) - self.root_helper = root_helper - self.init_firewall() - - -class NVSDNeutronAgent(rpc_compat.RpcCallback): - # history - # 1.0 Initial version - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - def __init__(self, integ_br, root_helper, polling_interval): - super(NVSDNeutronAgent, self).__init__() - self.int_br = ovs_lib.OVSBridge(integ_br, root_helper) - self.polling_interval = polling_interval - self.root_helper = root_helper - self.setup_rpc() - self.ports = set() - - def setup_rpc(self): - - self.host = socket.gethostname() - self.agent_id = 'nvsd-q-agent.%s' % self.host - LOG.info(_("RPC agent_id: %s"), self.agent_id) - - self.topic = topics.AGENT - self.context = n_context.get_admin_context_without_session() - self.sg_agent = SecurityGroupAgentRpc(self.context, - self.root_helper) - - # RPC network init - # Handle updates from service - self.callback_oc = NVSDAgentRpcCallback(self.context, - self, self.sg_agent) - self.callback_sg = SecurityGroupAgentRpcCallback(self.context, - self.sg_agent) - self.endpoints = [self.callback_oc, self.callback_sg] - # Define the listening consumer for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - def _update_ports(self, registered_ports): - ports = self.int_br.get_vif_port_set() - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def _process_devices_filter(self, port_info): - if 'added' in port_info: - self.sg_agent.prepare_devices_filter(port_info['added']) - if 'removed' in port_info: - self.sg_agent.remove_devices_filter(port_info['removed']) - - def daemon_loop(self): - """Main processing loop for OC Plugin Agent.""" - - ports = set() - while True: - try: - port_info = self._update_ports(ports) - if port_info: - LOG.debug(_("Port list is updated")) - self._process_devices_filter(port_info) - ports = port_info['current'] - self.ports = ports - except Exception: - LOG.exception(_("Error in agent event loop")) - - LOG.debug(_("AGENT looping.....")) - time.sleep(self.polling_interval) - - -def main(): - common_config.init(sys.argv[1:]) - common_config.setup_logging(config.CONF) - - integ_br = config.AGENT.integration_bridge - root_helper = config.AGENT.root_helper - polling_interval = config.AGENT.polling_interval - agent = NVSDNeutronAgent(integ_br, root_helper, polling_interval) - LOG.info(_("NVSD Agent initialized successfully, now running... ")) - - # Start everything. - agent.daemon_loop() diff --git a/neutron/plugins/oneconvergence/lib/__init__.py b/neutron/plugins/oneconvergence/lib/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/oneconvergence/lib/config.py b/neutron/plugins/oneconvergence/lib/config.py deleted file mode 100644 index 2bbf086a3..000000000 --- a/neutron/plugins/oneconvergence/lib/config.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -""" Register the configuration options""" - -from oslo.config import cfg - -from neutron.agent.common import config - - -NVSD_OPT = [ - cfg.StrOpt('nvsd_ip', - default='127.0.0.1', - help=_("NVSD Controller IP address")), - cfg.IntOpt('nvsd_port', - default=8082, - help=_("NVSD Controller Port number")), - cfg.StrOpt('nvsd_user', - default='ocplugin', - help=_("NVSD Controller username")), - cfg.StrOpt('nvsd_passwd', - default='oc123', secret=True, - help=_("NVSD Controller password")), - cfg.IntOpt('request_timeout', - default=30, - help=_("NVSD controller REST API request timeout in seconds")), - cfg.IntOpt('nvsd_retries', default=0, - help=_("Number of login retries to NVSD controller")) -] - -agent_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("integration bridge")), - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), -] - -cfg.CONF.register_opts(NVSD_OPT, "nvsd") -cfg.CONF.register_opts(agent_opts, "AGENT") - -config.register_root_helper(cfg.CONF) - -CONF = cfg.CONF -AGENT = cfg.CONF.AGENT diff --git a/neutron/plugins/oneconvergence/lib/exception.py b/neutron/plugins/oneconvergence/lib/exception.py deleted file mode 100644 index b6864b13f..000000000 --- a/neutron/plugins/oneconvergence/lib/exception.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""NVSD Exception Definitions.""" - -from neutron.common import exceptions as n_exc - - -class NVSDAPIException(n_exc.NeutronException): - '''Base NVSDplugin Exception.''' - message = _("An unknown nvsd plugin exception occurred: %(reason)s") - - -class RequestTimeout(NVSDAPIException): - message = _("The request has timed out.") - - -class UnAuthorizedException(NVSDAPIException): - message = _("Invalid access credentials to the Server.") - - -class NotFoundException(NVSDAPIException): - message = _("A resource is not found: %(reason)s") - - -class BadRequestException(NVSDAPIException): - message = _("Request sent to server is invalid: %(reason)s") - - -class ServerException(NVSDAPIException): - message = _("Internal Server Error: %(reason)s") - - -class ConnectionClosedException(NVSDAPIException): - message = _("Connection is closed by the server.") - - -class ForbiddenException(NVSDAPIException): - message = _("The request is forbidden access to the resource: %(reason)s") - - -class InternalServerError(NVSDAPIException): - message = _("Internal Server Error from NVSD controller: %(reason)s") diff --git a/neutron/plugins/oneconvergence/lib/nvsd_db.py b/neutron/plugins/oneconvergence/lib/nvsd_db.py deleted file mode 100644 index 00a623a85..000000000 --- a/neutron/plugins/oneconvergence/lib/nvsd_db.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kedar Kulkarni, One Convergence, Inc. - -from neutron.db import api as db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import securitygroup as ext_sg -from neutron import manager - - -def get_port_from_device(port_id): - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id == port_id) - port_and_sgs = query.all() - if not port_and_sgs: - return None - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict[ext_sg.SECURITYGROUPS] = [ - sg_id for tport, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict diff --git a/neutron/plugins/oneconvergence/lib/nvsdlib.py b/neutron/plugins/oneconvergence/lib/nvsdlib.py deleted file mode 100644 index 09e8a5b16..000000000 --- a/neutron/plugins/oneconvergence/lib/nvsdlib.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kedar Kulkarni, One Convergence, Inc. - -"""Intermidiate NVSD Library.""" - -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -import neutron.plugins.oneconvergence.lib.exception as nvsdexception -from neutron.plugins.oneconvergence.lib import plugin_helper - -LOG = logging.getLogger(__name__) - -NETWORKS_URI = "/pluginhandler/ocplugin/tenant/%s/lnetwork/" -NETWORK_URI = NETWORKS_URI + "%s" -GET_ALL_NETWORKS = "/pluginhandler/ocplugin/tenant/getallnetworks" - -SUBNETS_URI = NETWORK_URI + "/lsubnet/" -SUBNET_URI = SUBNETS_URI + "%s" -GET_ALL_SUBNETS = "/pluginhandler/ocplugin/tenant/getallsubnets" - -PORTS_URI = NETWORK_URI + "/lport/" -PORT_URI = PORTS_URI + "%s" - -EXT_URI = "/pluginhandler/ocplugin/ext/tenant/%s" -FLOATING_IPS_URI = EXT_URI + "/floatingip/" -FLOATING_IP_URI = FLOATING_IPS_URI + "%s" - -ROUTERS_URI = EXT_URI + "/lrouter/" -ROUTER_URI = ROUTERS_URI + "%s" - -METHODS = {"POST": "create", - "PUT": "update", - "DELETE": "delete", - "GET": "get"} - - -class NVSDApi(object): - - def build_error_msg(self, method, resource, tenant_id, resource_id): - if method == "POST": - msg = _("Could not create a %(resource)s under tenant " - "%(tenant_id)s") % {'resource': resource, - 'tenant_id': tenant_id} - elif resource_id: - msg = _("Failed to %(method)s %(resource)s " - "id=%(resource_id)s") % {'method': METHODS[method], - 'resource': resource, - 'resource_id': resource_id - } - else: - msg = _("Failed to %(method)s %(resource)s") % { - 'method': METHODS[method], 'resource': resource} - return msg - - def set_connection(self): - self.nvsdcontroller = plugin_helper.initialize_plugin_helper() - self.nvsdcontroller.login() - - def send_request(self, method, uri, body=None, resource=None, - tenant_id='', resource_id=None): - """Issue a request to NVSD controller.""" - - try: - result = self.nvsdcontroller.request(method, uri, body=body) - except nvsdexception.NVSDAPIException as e: - with excutils.save_and_reraise_exception() as ctxt: - msg = self.build_error_msg(method, resource, tenant_id, - resource_id) - LOG.error(msg) - # Modifying the reason message without disturbing the exception - # info - ctxt.value = type(e)(reason=msg) - return result - - def create_network(self, network): - - tenant_id = network['tenant_id'] - router_external = network['router:external'] is True - - network_obj = { - "name": network['name'], - "tenant_id": tenant_id, - "shared": network['shared'], - "admin_state_up": network['admin_state_up'], - "router:external": router_external - } - - uri = NETWORKS_URI % tenant_id - - response = self.send_request("POST", uri, body=json.dumps(network_obj), - resource='network', tenant_id=tenant_id) - - nvsd_net = response.json() - - LOG.debug(_("Network %(id)s created under tenant %(tenant_id)s"), - {'id': nvsd_net['id'], 'tenant_id': tenant_id}) - - return nvsd_net - - def update_network(self, network, network_update): - - tenant_id = network['tenant_id'] - network_id = network['id'] - - uri = NETWORK_URI % (tenant_id, network_id) - - self.send_request("PUT", uri, - body=json.dumps(network_update), - resource='network', tenant_id=tenant_id, - resource_id=network_id) - - LOG.debug(_("Network %(id)s updated under tenant %(tenant_id)s"), - {'id': network_id, 'tenant_id': tenant_id}) - - def delete_network(self, network, subnets=[]): - - tenant_id = network['tenant_id'] - network_id = network['id'] - - ports = self._get_ports(tenant_id, network_id) - - for port in ports: - self.delete_port(port['id'], port) - - for subnet in subnets: - self.delete_subnet(subnet) - - path = NETWORK_URI % (tenant_id, network_id) - - self.send_request("DELETE", path, resource='network', - tenant_id=tenant_id, resource_id=network_id) - - LOG.debug(_("Network %(id)s deleted under tenant %(tenant_id)s"), - {'id': network_id, 'tenant_id': tenant_id}) - - def create_subnet(self, subnet): - - tenant_id = subnet['tenant_id'] - network_id = subnet['network_id'] - - uri = SUBNETS_URI % (tenant_id, network_id) - - self.send_request("POST", uri, body=json.dumps(subnet), - resource='subnet', tenant_id=tenant_id) - - LOG.debug(_("Subnet %(id)s created under tenant %(tenant_id)s"), - {'id': subnet['id'], 'tenant_id': tenant_id}) - - def delete_subnet(self, subnet): - - tenant_id = subnet['tenant_id'] - network_id = subnet['network_id'] - subnet_id = subnet['id'] - - uri = SUBNET_URI % (tenant_id, network_id, subnet_id) - - self.send_request("DELETE", uri, resource='subnet', - tenant_id=tenant_id, resource_id=subnet_id) - - LOG.debug(_("Subnet %(id)s deleted under tenant %(tenant_id)s"), - {'id': subnet_id, 'tenant_id': tenant_id}) - - def update_subnet(self, subnet, subnet_update): - - tenant_id = subnet['tenant_id'] - network_id = subnet['network_id'] - subnet_id = subnet['id'] - - uri = SUBNET_URI % (tenant_id, network_id, subnet_id) - - self.send_request("PUT", uri, - body=json.dumps(subnet_update), - resource='subnet', tenant_id=tenant_id, - resource_id=subnet_id) - - LOG.debug(_("Subnet %(id)s updated under tenant %(tenant_id)s"), - {'id': subnet_id, 'tenant_id': tenant_id}) - - def create_port(self, tenant_id, port): - - network_id = port["network_id"] - fixed_ips = port.get("fixed_ips") - ip_address = None - subnet_id = None - - if fixed_ips: - ip_address = fixed_ips[0].get("ip_address") - subnet_id = fixed_ips[0].get("subnet_id") - - lport = { - "id": port["id"], - "name": port["name"], - "device_id": port["device_id"], - "device_owner": port["device_owner"], - "mac_address": port["mac_address"], - "ip_address": ip_address, - "subnet_id": subnet_id, - "admin_state_up": port["admin_state_up"], - "network_id": network_id, - "status": port["status"] - } - - path = PORTS_URI % (tenant_id, network_id) - - self.send_request("POST", path, body=json.dumps(lport), - resource='port', tenant_id=tenant_id) - - LOG.debug(_("Port %(id)s created under tenant %(tenant_id)s"), - {'id': port['id'], 'tenant_id': tenant_id}) - - def update_port(self, tenant_id, port, port_update): - - network_id = port['network_id'] - port_id = port['id'] - - lport = {} - for k in ('admin_state_up', 'name', 'device_id', 'device_owner'): - if k in port_update: - lport[k] = port_update[k] - - fixed_ips = port_update.get('fixed_ips', None) - if fixed_ips: - lport["ip_address"] = fixed_ips[0].get("ip_address") - lport["subnet_id"] = fixed_ips[0].get("subnet_id") - - uri = PORT_URI % (tenant_id, network_id, port_id) - - self.send_request("PUT", uri, body=json.dumps(lport), - resource='port', tenant_id=tenant_id, - resource_id=port_id) - - LOG.debug(_("Port %(id)s updated under tenant %(tenant_id)s"), - {'id': port_id, 'tenant_id': tenant_id}) - - def delete_port(self, port_id, port): - - tenant_id = port['tenant_id'] - network_id = port['network_id'] - - uri = PORT_URI % (tenant_id, network_id, port_id) - - self.send_request("DELETE", uri, resource='port', tenant_id=tenant_id, - resource_id=port_id) - - LOG.debug(_("Port %(id)s deleted under tenant %(tenant_id)s"), - {'id': port_id, 'tenant_id': tenant_id}) - - def _get_ports(self, tenant_id, network_id): - - uri = PORTS_URI % (tenant_id, network_id) - - response = self.send_request("GET", uri, resource='ports', - tenant_id=tenant_id) - - return response.json() - - def create_floatingip(self, floating_ip): - - tenant_id = floating_ip['tenant_id'] - - uri = FLOATING_IPS_URI % tenant_id - - self.send_request("POST", uri, body=json.dumps(floating_ip), - resource='floating_ip', - tenant_id=tenant_id) - - LOG.debug(_("Flatingip %(id)s created under tenant %(tenant_id)s"), - {'id': floating_ip['id'], 'tenant_id': tenant_id}) - - def update_floatingip(self, floating_ip, floating_ip_update): - - tenant_id = floating_ip['tenant_id'] - - floating_ip_id = floating_ip['id'] - - uri = FLOATING_IP_URI % (tenant_id, floating_ip_id) - - self.send_request("PUT", uri, - body=json.dumps(floating_ip_update['floatingip']), - resource='floating_ip', - tenant_id=tenant_id, - resource_id=floating_ip_id) - - LOG.debug(_("Flatingip %(id)s updated under tenant %(tenant_id)s"), - {'id': floating_ip_id, 'tenant_id': tenant_id}) - - def delete_floatingip(self, floating_ip): - - tenant_id = floating_ip['tenant_id'] - - floating_ip_id = floating_ip['id'] - - uri = FLOATING_IP_URI % (tenant_id, floating_ip_id) - - self.send_request("DELETE", uri, resource='floating_ip', - tenant_id=tenant_id, resource_id=floating_ip_id) - - LOG.debug(_("Flatingip %(id)s deleted under tenant %(tenant_id)s"), - {'id': floating_ip_id, 'tenant_id': tenant_id}) - - def create_router(self, router): - - tenant_id = router['tenant_id'] - - uri = ROUTERS_URI % tenant_id - - self.send_request("POST", uri, body=json.dumps(router), - resource='router', - tenant_id=tenant_id) - - LOG.debug(_("Router %(id)s created under tenant %(tenant_id)s"), - {'id': router['id'], 'tenant_id': tenant_id}) - - def update_router(self, router): - - tenant_id = router['tenant_id'] - - router_id = router['id'] - - uri = ROUTER_URI % (tenant_id, router_id) - - self.send_request("PUT", uri, - body=json.dumps(router), - resource='router', tenant_id=tenant_id, - resource_id=router_id) - - LOG.debug(_("Router %(id)s updated under tenant %(tenant_id)s"), - {'id': router_id, 'tenant_id': tenant_id}) - - def delete_router(self, tenant_id, router_id): - - uri = ROUTER_URI % (tenant_id, router_id) - - self.send_request("DELETE", uri, resource='router', - tenant_id=tenant_id, resource_id=router_id) - - LOG.debug(_("Router %(id)s deleted under tenant %(tenant_id)s"), - {'id': router_id, 'tenant_id': tenant_id}) diff --git a/neutron/plugins/oneconvergence/lib/plugin_helper.py b/neutron/plugins/oneconvergence/lib/plugin_helper.py deleted file mode 100644 index 4158257fd..000000000 --- a/neutron/plugins/oneconvergence/lib/plugin_helper.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kedar Kulkarni, One Convergence, Inc. - -"""Library to talk to NVSD controller.""" - -import httplib -import time - -from oslo.config import cfg -import requests -from six.moves.urllib import parse - -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -import neutron.plugins.oneconvergence.lib.exception as exception - -LOG = logging.getLogger(__name__) - - -def initialize_plugin_helper(): - nvsdcontroller = NVSDController() - return nvsdcontroller - - -class NVSDController(object): - - """Encapsulates the NVSD Controller details.""" - - def __init__(self): - - self._host = cfg.CONF.nvsd.nvsd_ip - self._port = cfg.CONF.nvsd.nvsd_port - self._user = cfg.CONF.nvsd.nvsd_user - self._password = cfg.CONF.nvsd.nvsd_passwd - self._retries = cfg.CONF.nvsd.nvsd_retries - self._request_timeout = float(cfg.CONF.nvsd.request_timeout) - self.api_url = 'http://' + self._host + ':' + str(self._port) - - self.pool = requests.Session() - - self.auth_token = None - - def do_request(self, method, url=None, headers=None, data=None, - timeout=10): - response = self.pool.request(method, url=url, - headers=headers, data=data, - timeout=self._request_timeout) - return response - - def login(self): - """Login to NVSD Controller.""" - - headers = {"Content-Type": "application/json"} - - login_url = parse.urljoin(self.api_url, - "/pluginhandler/ocplugin/authmgmt/login") - - data = json.dumps({"user_name": self._user, "passwd": self._password}) - - attempts = 0 - - while True: - if attempts < self._retries: - attempts += 1 - elif self._retries == 0: - attempts = 0 - else: - msg = _("Unable to connect to NVSD controller. Exiting after " - "%(retries)s attempts") % {'retries': self._retries} - LOG.error(msg) - raise exception.ServerException(reason=msg) - try: - response = self.do_request("POST", url=login_url, - headers=headers, data=data, - timeout=self._request_timeout) - break - except Exception as e: - LOG.error(_("Login Failed: %s"), e) - LOG.error(_("Unable to establish connection" - " with Controller %s"), self.api_url) - LOG.error(_("Retrying after 1 second...")) - time.sleep(1) - - if response.status_code == requests.codes.ok: - LOG.debug(_("Login Successful %(uri)s " - "%(status)s"), {'uri': self.api_url, - 'status': response.status_code}) - self.auth_token = json.loads(response.content)["session_uuid"] - LOG.debug(_("AuthToken = %s"), self.auth_token) - else: - LOG.error(_("login failed")) - - return - - def request(self, method, url, body="", content_type="application/json"): - """Issue a request to NVSD controller.""" - - if self.auth_token is None: - LOG.warning(_("No Token, Re-login")) - self.login() - - headers = {"Content-Type": content_type} - - uri = parse.urljoin(url, "?authToken=%s" % self.auth_token) - - url = parse.urljoin(self.api_url, uri) - - request_ok = False - response = None - - try: - response = self.do_request(method, url=url, - headers=headers, data=body, - timeout=self._request_timeout) - - LOG.debug(_("request: %(method)s %(uri)s successful"), - {'method': method, 'uri': self.api_url + uri}) - request_ok = True - except httplib.IncompleteRead as e: - response = e.partial - request_ok = True - except Exception as e: - LOG.error(_("request: Request failed from " - "Controller side :%s"), e) - - if response is None: - # Timeout. - LOG.error(_("Response is Null, Request timed out: %(method)s to " - "%(uri)s"), {'method': method, 'uri': uri}) - self.auth_token = None - raise exception.RequestTimeout() - - status = response.status_code - if status == requests.codes.unauthorized: - self.auth_token = None - # Raise an exception to inform that the request failed. - raise exception.UnAuthorizedException() - - if status in self.error_codes: - LOG.error(_("Request %(method)s %(uri)s body = %(body)s failed " - "with status %(status)s"), {'method': method, - 'uri': uri, 'body': body, - 'status': status}) - LOG.error(_("%s"), response.reason) - raise self.error_codes[status]() - elif status not in (requests.codes.ok, requests.codes.created, - requests.codes.no_content): - LOG.error(_("%(method)s to %(url)s, unexpected response code: " - "%(status)d"), {'method': method, 'url': url, - 'status': status}) - return - - if not request_ok: - LOG.error(_("Request failed from Controller side with " - "Status=%s"), status) - raise exception.ServerException() - else: - LOG.debug(_("Success: %(method)s %(url)s status=%(status)s"), - {'method': method, 'url': self.api_url + uri, - 'status': status}) - response.body = response.content - return response - - error_codes = { - 404: exception.NotFoundException, - 409: exception.BadRequestException, - 500: exception.InternalServerError, - 503: exception.ServerException, - 403: exception.ForbiddenException, - 301: exception.NVSDAPIException, - 307: exception.NVSDAPIException, - 400: exception.NVSDAPIException, - } diff --git a/neutron/plugins/oneconvergence/plugin.py b/neutron/plugins/oneconvergence/plugin.py deleted file mode 100644 index 257ab5494..000000000 --- a/neutron/plugins/oneconvergence/plugin.py +++ /dev/null @@ -1,440 +0,0 @@ -# Copyright 2014 OneConvergence, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kedar Kulkarni, One Convergence, Inc. - -"""Implementation of OneConvergence Neutron Plugin.""" - -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.common import constants as q_const -from neutron.common import exceptions as nexception -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_gwmode_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_base -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -import neutron.plugins.oneconvergence.lib.config # noqa -import neutron.plugins.oneconvergence.lib.exception as nvsdexception -import neutron.plugins.oneconvergence.lib.nvsd_db as nvsd_db -from neutron.plugins.oneconvergence.lib import nvsdlib as nvsd_lib - -LOG = logging.getLogger(__name__) -IPv6 = 6 - - -class NVSDPluginRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - @staticmethod - def get_port_from_device(device): - port = nvsd_db.get_port_from_device(device) - if port: - port['device'] = device - return port - - -class NVSDPluginV2AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(NVSDPluginV2AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_port_update = topics.get_topic_name(topic, topics.PORT, - topics.UPDATE) - - def port_update(self, context, port): - self.fanout_cast(context, - self.make_msg('port_update', port=port), - topic=self.topic_port_update) - - -class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2, - extraroute_db.ExtraRoute_db_mixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - external_net_db.External_net_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - portbindings_base.PortBindingBaseMixin, - sg_db_rpc.SecurityGroupServerRpcMixin): - - """L2 Virtual Network Plugin. - - OneConvergencePluginV2 is a Neutron plugin that provides L2 Virtual Network - functionality. - """ - - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - _supported_extension_aliases = ['agent', - 'binding', - 'dhcp_agent_scheduler', - 'ext-gw-mode', - 'external-net', - 'extraroute', - 'l3_agent_scheduler', - 'quotas', - 'router', - 'security-group' - ] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self): - - super(OneConvergencePluginV2, self).__init__() - - self.oneconvergence_init() - - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: { - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - - portbindings_base.register_port_dict_function() - - self.setup_rpc() - - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver) - self.router_scheduler = importutils.import_object( - cfg.CONF.router_scheduler_driver) - - def oneconvergence_init(self): - """Initialize the connections and set the log levels for the plugin.""" - - self.nvsdlib = nvsd_lib.NVSDApi() - self.nvsdlib.set_connection() - - def setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( - l3_rpc_agent_api.L3AgentNotifyAPI() - ) - self.endpoints = [NVSDPluginRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def create_network(self, context, network): - - tenant_id = self._get_tenant_id_for_create( - context, network['network']) - self._ensure_default_security_group(context, tenant_id) - - net = self.nvsdlib.create_network(network['network']) - - network['network']['id'] = net['id'] - - with context.session.begin(subtransactions=True): - try: - neutron_net = super(OneConvergencePluginV2, - self).create_network(context, network) - - #following call checks whether the network is external or not - #and if it is external then adds this network to - #externalnetworks table of neutron db - self._process_l3_create(context, neutron_net, - network['network']) - except nvsdexception.NVSDAPIException: - with excutils.save_and_reraise_exception(): - self.nvsdlib.delete_network(net) - - return neutron_net - - def update_network(self, context, net_id, network): - - with context.session.begin(subtransactions=True): - - neutron_net = super(OneConvergencePluginV2, - self).update_network(context, net_id, network) - - self.nvsdlib.update_network(neutron_net, network['network']) - # updates neutron database e.g. externalnetworks table. - self._process_l3_update(context, neutron_net, network['network']) - - return neutron_net - - def delete_network(self, context, net_id): - - with context.session.begin(subtransactions=True): - network = self._get_network(context, net_id) - #get all the subnets under the network to delete them - subnets = self._get_subnets_by_network(context, net_id) - - self._process_l3_delete(context, net_id) - super(OneConvergencePluginV2, self).delete_network(context, - net_id) - - self.nvsdlib.delete_network(network, subnets) - - def create_subnet(self, context, subnet): - - if subnet['subnet']['ip_version'] == IPv6: - raise nexception.InvalidInput( - error_message="NVSDPlugin doesn't support IPv6.") - - neutron_subnet = super(OneConvergencePluginV2, - self).create_subnet(context, subnet) - - try: - self.nvsdlib.create_subnet(neutron_subnet) - except nvsdexception.NVSDAPIException: - with excutils.save_and_reraise_exception(): - #Log the message and delete the subnet from the neutron - super(OneConvergencePluginV2, - self).delete_subnet(context, neutron_subnet['id']) - LOG.error(_("Failed to create subnet, " - "deleting it from neutron")) - - return neutron_subnet - - def delete_subnet(self, context, subnet_id): - - neutron_subnet = self._get_subnet(context, subnet_id) - - with context.session.begin(subtransactions=True): - - super(OneConvergencePluginV2, self).delete_subnet(context, - subnet_id) - - self.nvsdlib.delete_subnet(neutron_subnet) - - def update_subnet(self, context, subnet_id, subnet): - - with context.session.begin(subtransactions=True): - - neutron_subnet = super(OneConvergencePluginV2, - self).update_subnet(context, subnet_id, - subnet) - - self.nvsdlib.update_subnet(neutron_subnet, subnet) - return neutron_subnet - - def create_port(self, context, port): - - self._ensure_default_security_group_on_port(context, port) - - sgids = self._get_security_groups_on_port(context, port) - - network = {} - - network_id = port['port']['network_id'] - - with context.session.begin(subtransactions=True): - - # Invoke the Neutron API for creating port - neutron_port = super(OneConvergencePluginV2, - self).create_port(context, port) - - self._process_portbindings_create_and_update(context, - port['port'], - neutron_port) - - self._process_port_create_security_group(context, neutron_port, - sgids) - if port['port']['device_owner'] in ('network:router_gateway', - 'network:floatingip'): - # for l3 requests, tenant_id will be None/'' - network = self._get_network(context, network_id) - - tenant_id = network['tenant_id'] - else: - tenant_id = port['port']['tenant_id'] - - port_id = neutron_port['id'] - - try: - self.nvsdlib.create_port(tenant_id, neutron_port) - except nvsdexception.NVSDAPIException: - with excutils.save_and_reraise_exception(): - LOG.error(_("Deleting newly created " - "neutron port %s"), port_id) - super(OneConvergencePluginV2, self).delete_port(context, - port_id) - - self.notify_security_groups_member_updated(context, neutron_port) - - return neutron_port - - def update_port(self, context, port_id, port): - - with context.session.begin(subtransactions=True): - - old_port = super(OneConvergencePluginV2, self).get_port(context, - port_id) - - neutron_port = super(OneConvergencePluginV2, - self).update_port(context, port_id, port) - - if neutron_port['tenant_id'] == '': - network = self._get_network(context, - neutron_port['network_id']) - tenant_id = network['tenant_id'] - else: - tenant_id = neutron_port['tenant_id'] - - self.nvsdlib.update_port(tenant_id, neutron_port, port['port']) - - self._process_portbindings_create_and_update(context, - port['port'], - neutron_port) - need_port_update_notify = self.update_security_group_on_port( - context, port_id, port, old_port, neutron_port) - - if need_port_update_notify: - self.notifier.port_update(context, neutron_port) - - return neutron_port - - def delete_port(self, context, port_id, l3_port_check=True): - - if l3_port_check: - self.prevent_l3_port_deletion(context, port_id) - - with context.session.begin(subtransactions=True): - neutron_port = super(OneConvergencePluginV2, - self).get_port(context, port_id) - - self._delete_port_security_group_bindings(context, port_id) - - self.disassociate_floatingips(context, port_id) - - super(OneConvergencePluginV2, self).delete_port(context, port_id) - - network = self._get_network(context, neutron_port['network_id']) - neutron_port['tenant_id'] = network['tenant_id'] - - self.nvsdlib.delete_port(port_id, neutron_port) - - self.notify_security_groups_member_updated(context, neutron_port) - - def create_floatingip(self, context, floatingip): - - neutron_floatingip = super(OneConvergencePluginV2, - self).create_floatingip(context, - floatingip) - try: - self.nvsdlib.create_floatingip(neutron_floatingip) - except nvsdexception.NVSDAPIException: - with excutils.save_and_reraise_exception(): - LOG.error(_("Failed to create floatingip")) - super(OneConvergencePluginV2, - self).delete_floatingip(context, - neutron_floatingip['id']) - - return neutron_floatingip - - def update_floatingip(self, context, fip_id, floatingip): - - with context.session.begin(subtransactions=True): - - neutron_floatingip = super(OneConvergencePluginV2, - self).update_floatingip(context, - fip_id, - floatingip) - - self.nvsdlib.update_floatingip(neutron_floatingip, floatingip) - - return neutron_floatingip - - def delete_floatingip(self, context, floating_ip_id): - - with context.session.begin(subtransactions=True): - - floating_ip = self._get_floatingip(context, floating_ip_id) - - super(OneConvergencePluginV2, - self).delete_floatingip(context, floating_ip_id) - - self.nvsdlib.delete_floatingip(floating_ip) - - def create_router(self, context, router): - - neutron_router = super(OneConvergencePluginV2, - self).create_router(context, router) - try: - self.nvsdlib.create_router(neutron_router) - except nvsdexception.NVSDAPIException: - with excutils.save_and_reraise_exception(): - LOG.error(_("Failed to create router")) - super(OneConvergencePluginV2, - self).delete_router(context, neutron_router['id']) - - return neutron_router - - def update_router(self, context, router_id, router): - - with context.session.begin(subtransactions=True): - - neutron_router = super(OneConvergencePluginV2, - self).update_router(context, router_id, - router) - - self.nvsdlib.update_router(neutron_router) - - return neutron_router - - def delete_router(self, context, router_id): - - tenant_id = self._get_router(context, router_id)['tenant_id'] - - with context.session.begin(subtransactions=True): - - super(OneConvergencePluginV2, self).delete_router(context, - router_id) - - self.nvsdlib.delete_router(tenant_id, router_id) diff --git a/neutron/plugins/openvswitch/README b/neutron/plugins/openvswitch/README deleted file mode 100644 index b8991ad0a..000000000 --- a/neutron/plugins/openvswitch/README +++ /dev/null @@ -1,6 +0,0 @@ -The Open vSwitch (OVS) Neutron plugin is a simple plugin to manage OVS -features using a local agent running on each hypervisor. - -For details on how to configure and use the plugin, see: - -http://openvswitch.org/openstack/documentation/ diff --git a/neutron/plugins/openvswitch/__init__.py b/neutron/plugins/openvswitch/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/openvswitch/agent/__init__.py b/neutron/plugins/openvswitch/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py deleted file mode 100644 index c5b136b06..000000000 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ /dev/null @@ -1,1517 +0,0 @@ -#!/usr/bin/env python -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib -import signal -import sys -import time - -import eventlet -eventlet.monkey_patch() - -import netaddr -from oslo.config import cfg -from six import moves - -from neutron.agent import l2population_rpc -from neutron.agent.linux import ip_lib -from neutron.agent.linux import ovs_lib -from neutron.agent.linux import polling -from neutron.agent.linux import utils -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import constants as q_const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils as q_utils -from neutron import context -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.common import constants as p_const -from neutron.plugins.openvswitch.common import config # noqa -from neutron.plugins.openvswitch.common import constants - - -LOG = logging.getLogger(__name__) - -# A placeholder for dead vlans. -DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1) - - -# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' -# attributes set). -class LocalVLANMapping: - def __init__(self, vlan, network_type, physical_network, segmentation_id, - vif_ports=None): - if vif_ports is None: - vif_ports = {} - self.vlan = vlan - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id - self.vif_ports = vif_ports - # set of tunnel ports on which packets should be flooded - self.tun_ofports = set() - - def __str__(self): - return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % - (self.vlan, self.network_type, self.physical_network, - self.segmentation_id)) - - -class OVSPluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - pass - - -class OVSSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): - def __init__(self, context, plugin_rpc, root_helper): - self.context = context - self.plugin_rpc = plugin_rpc - self.root_helper = root_helper - self.init_firewall(defer_refresh_firewall=True) - - -class OVSNeutronAgent(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin, - l2population_rpc.L2populationRpcCallBackMixin): - '''Implements OVS-based tunneling, VLANs and flat networks. - - Two local bridges are created: an integration bridge (defaults to - 'br-int') and a tunneling bridge (defaults to 'br-tun'). An - additional bridge is created for each physical network interface - used for VLANs and/or flat networks. - - All VM VIFs are plugged into the integration bridge. VM VIFs on a - given virtual network share a common "local" VLAN (i.e. not - propagated externally). The VLAN id of this local VLAN is mapped - to the physical networking details realizing that virtual network. - - For virtual networks realized as GRE tunnels, a Logical Switch - (LS) identifier is used to differentiate tenant traffic on - inter-HV tunnels. A mesh of tunnels is created to other - Hypervisors in the cloud. These tunnels originate and terminate on - the tunneling bridge of each hypervisor. Port patching is done to - connect local VLANs on the integration bridge to inter-hypervisor - tunnels on the tunnel bridge. - - For each virtual network realized as a VLAN or flat network, a - veth is used to connect the local VLAN on the integration bridge - with the physical network bridge, with flow rules adding, - modifying, or stripping VLAN tags as necessary. - ''' - - # history - # 1.0 Initial version - # 1.1 Support Security Group RPC - RPC_API_VERSION = '1.1' - - def __init__(self, integ_br, tun_br, local_ip, - bridge_mappings, root_helper, - polling_interval, tunnel_types=None, - veth_mtu=None, l2_population=False, - minimize_polling=False, - ovsdb_monitor_respawn_interval=( - constants.DEFAULT_OVSDBMON_RESPAWN), - arp_responder=False): - '''Constructor. - - :param integ_br: name of the integration bridge. - :param tun_br: name of the tunnel bridge. - :param local_ip: local IP address of this hypervisor. - :param bridge_mappings: mappings from physical network name to bridge. - :param root_helper: utility to use when running shell cmds. - :param polling_interval: interval (secs) to poll DB. - :param tunnel_types: A list of tunnel types to enable support for in - the agent. If set, will automatically set enable_tunneling to - True. - :param veth_mtu: MTU size for veth interfaces. - :param l2_population: Optional, whether L2 population is turned on - :param minimize_polling: Optional, whether to minimize polling by - monitoring ovsdb for interface changes. - :param ovsdb_monitor_respawn_interval: Optional, when using polling - minimization, the number of seconds to wait before respawning - the ovsdb monitor. - :param arp_responder: Optional, enable local ARP responder if it is - supported. - ''' - super(OVSNeutronAgent, self).__init__() - self.veth_mtu = veth_mtu - self.root_helper = root_helper - self.available_local_vlans = set(moves.xrange(q_const.MIN_VLAN_TAG, - q_const.MAX_VLAN_TAG)) - self.tunnel_types = tunnel_types or [] - self.l2_pop = l2_population - # TODO(ethuleau): Initially, local ARP responder is be dependent to the - # ML2 l2 population mechanism driver. - self.arp_responder_enabled = (arp_responder and - self._check_arp_responder_support() and - self.l2_pop) - self.agent_state = { - 'binary': 'neutron-openvswitch-agent', - 'host': cfg.CONF.host, - 'topic': q_const.L2_AGENT_TOPIC, - 'configurations': {'bridge_mappings': bridge_mappings, - 'tunnel_types': self.tunnel_types, - 'tunneling_ip': local_ip, - 'l2_population': self.l2_pop, - 'arp_responder_enabled': - self.arp_responder_enabled}, - 'agent_type': q_const.AGENT_TYPE_OVS, - 'start_flag': True} - - # Keep track of int_br's device count for use by _report_state() - self.int_br_device_count = 0 - - self.int_br = ovs_lib.OVSBridge(integ_br, self.root_helper) - self.int_br.set_secure_mode() - # Stores port update notifications for processing in main rpc loop - self.updated_ports = set() - self.setup_rpc() - self.setup_integration_br() - self.bridge_mappings = bridge_mappings - self.setup_physical_bridges(self.bridge_mappings) - self.local_vlan_map = {} - self.tun_br_ofports = {p_const.TYPE_GRE: {}, - p_const.TYPE_VXLAN: {}} - - self.polling_interval = polling_interval - self.minimize_polling = minimize_polling - self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval - - if tunnel_types: - self.enable_tunneling = True - else: - self.enable_tunneling = False - self.local_ip = local_ip - self.tunnel_count = 0 - self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port - self.dont_fragment = cfg.CONF.AGENT.dont_fragment - self.tun_br = None - if self.enable_tunneling: - self.setup_tunnel_br(tun_br) - # Collect additional bridges to monitor - self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) - - # Security group agent support - self.sg_agent = OVSSecurityGroupAgent(self.context, - self.plugin_rpc, - root_helper) - # Initialize iteration counter - self.iter_num = 0 - - def _check_arp_responder_support(self): - '''Check if OVS supports to modify ARP headers. - - This functionality is only available since the development branch 2.1. - ''' - args = ['arp,action=load:0x2->NXM_OF_ARP_OP[],' - 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' - 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[]'] - supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'add-flow', - args) - if not supported: - LOG.warning(_('OVS version can not support ARP responder.')) - return supported - - def _report_state(self): - # How many devices are likely used by a VM - self.agent_state.get('configurations')['devices'] = ( - self.int_br_device_count) - try: - self.state_rpc.report_state(self.context, - self.agent_state) - self.agent_state.pop('start_flag', None) - except Exception: - LOG.exception(_("Failed reporting state!")) - - def setup_rpc(self): - mac = self.int_br.get_local_port_mac() - self.agent_id = '%s%s' % ('ovs', (mac.replace(":", ""))) - self.topic = topics.AGENT - self.plugin_rpc = OVSPluginApi(topics.PLUGIN) - self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) - - # RPC network init - self.context = context.get_admin_context_without_session() - # Handle updates from service - self.endpoints = [self] - # Define the listening consumers for the agent - consumers = [[topics.PORT, topics.UPDATE], - [topics.NETWORK, topics.DELETE], - [constants.TUNNEL, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - if self.l2_pop: - consumers.append([topics.L2POPULATION, - topics.UPDATE, cfg.CONF.host]) - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - report_interval = cfg.CONF.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def get_net_uuid(self, vif_id): - for network_id, vlan_mapping in self.local_vlan_map.iteritems(): - if vif_id in vlan_mapping.vif_ports: - return network_id - - def network_delete(self, context, **kwargs): - LOG.debug(_("network_delete received")) - network_id = kwargs.get('network_id') - LOG.debug(_("Delete %s"), network_id) - # The network may not be defined on this agent - lvm = self.local_vlan_map.get(network_id) - if lvm: - self.reclaim_local_vlan(network_id) - else: - LOG.debug(_("Network %s not used on agent."), network_id) - - def port_update(self, context, **kwargs): - port = kwargs.get('port') - # Put the port identifier in the updated_ports set. - # Even if full port details might be provided to this call, - # they are not used since there is no guarantee the notifications - # are processed in the same order as the relevant API requests - self.updated_ports.add(port['id']) - LOG.debug(_("port_update message processed for port %s"), port['id']) - - def tunnel_update(self, context, **kwargs): - LOG.debug(_("tunnel_update received")) - if not self.enable_tunneling: - return - tunnel_ip = kwargs.get('tunnel_ip') - tunnel_id = kwargs.get('tunnel_id', self.get_ip_in_hex(tunnel_ip)) - if not tunnel_id: - return - tunnel_type = kwargs.get('tunnel_type') - if not tunnel_type: - LOG.error(_("No tunnel_type specified, cannot create tunnels")) - return - if tunnel_type not in self.tunnel_types: - LOG.error(_("tunnel_type %s not supported by agent"), tunnel_type) - return - if tunnel_ip == self.local_ip: - return - tun_name = '%s-%s' % (tunnel_type, tunnel_id) - if not self.l2_pop: - self.setup_tunnel_port(tun_name, tunnel_ip, tunnel_type) - - def fdb_add(self, context, fdb_entries): - LOG.debug(_("fdb_add received")) - for network_id, values in fdb_entries.items(): - lvm = self.local_vlan_map.get(network_id) - if not lvm: - # Agent doesn't manage any port in this network - continue - agent_ports = values.get('ports') - agent_ports.pop(self.local_ip, None) - if len(agent_ports): - self.tun_br.defer_apply_on() - for agent_ip, ports in agent_ports.items(): - # Ensure we have a tunnel port with this remote agent - ofport = self.tun_br_ofports[ - lvm.network_type].get(agent_ip) - if not ofport: - remote_ip_hex = self.get_ip_in_hex(agent_ip) - if not remote_ip_hex: - continue - port_name = '%s-%s' % (lvm.network_type, remote_ip_hex) - ofport = self.setup_tunnel_port(port_name, agent_ip, - lvm.network_type) - if ofport == 0: - continue - for port in ports: - self._add_fdb_flow(port, lvm, ofport) - self.tun_br.defer_apply_off() - - def fdb_remove(self, context, fdb_entries): - LOG.debug(_("fdb_remove received")) - for network_id, values in fdb_entries.items(): - lvm = self.local_vlan_map.get(network_id) - if not lvm: - # Agent doesn't manage any more ports in this network - continue - agent_ports = values.get('ports') - agent_ports.pop(self.local_ip, None) - if len(agent_ports): - self.tun_br.defer_apply_on() - for agent_ip, ports in agent_ports.items(): - ofport = self.tun_br_ofports[ - lvm.network_type].get(agent_ip) - if not ofport: - continue - for port in ports: - self._del_fdb_flow(port, lvm, ofport) - self.tun_br.defer_apply_off() - - def _add_fdb_flow(self, port_info, lvm, ofport): - if port_info == q_const.FLOODING_ENTRY: - lvm.tun_ofports.add(ofport) - ofports = ','.join(lvm.tun_ofports) - self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvm.vlan, - actions="strip_vlan,set_tunnel:%s," - "output:%s" % (lvm.segmentation_id, ofports)) - else: - self._set_arp_responder('add', lvm.vlan, port_info[0], - port_info[1]) - self.tun_br.add_flow(table=constants.UCAST_TO_TUN, - priority=2, - dl_vlan=lvm.vlan, - dl_dst=port_info[0], - actions="strip_vlan,set_tunnel:%s,output:%s" % - (lvm.segmentation_id, ofport)) - - def _del_fdb_flow(self, port_info, lvm, ofport): - if port_info == q_const.FLOODING_ENTRY: - lvm.tun_ofports.remove(ofport) - if len(lvm.tun_ofports) > 0: - ofports = ','.join(lvm.tun_ofports) - self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvm.vlan, - actions="strip_vlan," - "set_tunnel:%s,output:%s" % - (lvm.segmentation_id, ofports)) - else: - # This local vlan doesn't require any more tunnelling - self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN, - dl_vlan=lvm.vlan) - # Check if this tunnel port is still used - self.cleanup_tunnel_port(ofport, lvm.network_type) - else: - self._set_arp_responder('remove', lvm.vlan, port_info[0], - port_info[1]) - self.tun_br.delete_flows(table=constants.UCAST_TO_TUN, - dl_vlan=lvm.vlan, - dl_dst=port_info[0]) - - def _fdb_chg_ip(self, context, fdb_entries): - '''fdb update when an IP of a port is updated. - - The ML2 l2-pop mechanism driver send an fdb update rpc message when an - IP of a port is updated. - - :param context: RPC context. - :param fdb_entries: fdb dicts that contain all mac/IP informations per - agent and network. - {'net1': - {'agent_ip': - {'before': [[mac, ip]], - 'after': [[mac, ip]] - } - } - 'net2': - ... - } - ''' - LOG.debug(_("update chg_ip received")) - - # TODO(ethuleau): Use OVS defer apply flows for all rules will be an - # interesting improvement here. But actually, OVS lib defer apply flows - # methods doesn't ensure the add flows will be applied before delete. - for network_id, agent_ports in fdb_entries.items(): - lvm = self.local_vlan_map.get(network_id) - if not lvm: - continue - - for agent_ip, state in agent_ports.items(): - if agent_ip == self.local_ip: - continue - - after = state.get('after') - for mac, ip in after: - self._set_arp_responder('add', lvm.vlan, mac, ip) - - before = state.get('before') - for mac, ip in before: - self._set_arp_responder('remove', lvm.vlan, mac, ip) - - def fdb_update(self, context, fdb_entries): - LOG.debug(_("fdb_update received")) - for action, values in fdb_entries.items(): - method = '_fdb_' + action - if not hasattr(self, method): - raise NotImplementedError() - - getattr(self, method)(context, values) - - def _set_arp_responder(self, action, lvid, mac_str, ip_str): - '''Set the ARP respond entry. - - When the l2 population mechanism driver and OVS supports to edit ARP - fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the - tunnel bridge. - - :param action: add or remove ARP entry. - :param lvid: local VLAN map of network's ARP entry. - :param mac_str: MAC string value. - :param ip_str: IP string value. - ''' - if not self.arp_responder_enabled: - return - - mac = netaddr.EUI(mac_str, dialect=netaddr.mac_unix) - ip = netaddr.IPAddress(ip_str) - - if action == 'add': - actions = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' - 'mod_dl_src:%(mac)s,' - 'load:0x2->NXM_OF_ARP_OP[],' - 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' - 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' - 'load:%(mac)#x->NXM_NX_ARP_SHA[],' - 'load:%(ip)#x->NXM_OF_ARP_SPA[],' - 'in_port' % {'mac': mac, 'ip': ip}) - self.tun_br.add_flow(table=constants.ARP_RESPONDER, - priority=1, - proto='arp', - dl_vlan=lvid, - nw_dst='%s' % ip, - actions=actions) - elif action == 'remove': - self.tun_br.delete_flows(table=constants.ARP_RESPONDER, - proto='arp', - dl_vlan=lvid, - nw_dst='%s' % ip) - else: - LOG.warning(_('Action %s not supported'), action) - - def provision_local_vlan(self, net_uuid, network_type, physical_network, - segmentation_id): - '''Provisions a local VLAN. - - :param net_uuid: the uuid of the network associated with this vlan. - :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', - 'local') - :param physical_network: the physical network for 'vlan' or 'flat' - :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' - ''' - - # On a restart or crash of OVS, the network associated with this VLAN - # will already be assigned, so check for that here before assigning a - # new one. - lvm = self.local_vlan_map.get(net_uuid) - if lvm: - lvid = lvm.vlan - else: - if not self.available_local_vlans: - LOG.error(_("No local VLAN available for net-id=%s"), net_uuid) - return - lvid = self.available_local_vlans.pop() - self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, - network_type, - physical_network, - segmentation_id) - - LOG.info(_("Assigning %(vlan_id)s as local vlan for " - "net-id=%(net_uuid)s"), - {'vlan_id': lvid, 'net_uuid': net_uuid}) - - if network_type in constants.TUNNEL_NETWORK_TYPES: - if self.enable_tunneling: - # outbound broadcast/multicast - ofports = ','.join(self.tun_br_ofports[network_type].values()) - if ofports: - self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvid, - actions="strip_vlan," - "set_tunnel:%s,output:%s" % - (segmentation_id, ofports)) - # inbound from tunnels: set lvid in the right table - # and resubmit to Table LEARN_FROM_TUN for mac learning - self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], - priority=1, - tun_id=segmentation_id, - actions="mod_vlan_vid:%s,resubmit(,%s)" % - (lvid, constants.LEARN_FROM_TUN)) - else: - LOG.error(_("Cannot provision %(network_type)s network for " - "net-id=%(net_uuid)s - tunneling disabled"), - {'network_type': network_type, - 'net_uuid': net_uuid}) - elif network_type == p_const.TYPE_FLAT: - if physical_network in self.phys_brs: - # outbound - br = self.phys_brs[physical_network] - br.add_flow(priority=4, - in_port=self.phys_ofports[physical_network], - dl_vlan=lvid, - actions="strip_vlan,normal") - # inbound - self.int_br.add_flow( - priority=3, - in_port=self.int_ofports[physical_network], - dl_vlan=0xffff, - actions="mod_vlan_vid:%s,normal" % lvid) - else: - LOG.error(_("Cannot provision flat network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), - {'net_uuid': net_uuid, - 'physical_network': physical_network}) - elif network_type == p_const.TYPE_VLAN: - if physical_network in self.phys_brs: - # outbound - br = self.phys_brs[physical_network] - br.add_flow(priority=4, - in_port=self.phys_ofports[physical_network], - dl_vlan=lvid, - actions="mod_vlan_vid:%s,normal" % segmentation_id) - # inbound - self.int_br.add_flow(priority=3, - in_port=self. - int_ofports[physical_network], - dl_vlan=segmentation_id, - actions="mod_vlan_vid:%s,normal" % lvid) - else: - LOG.error(_("Cannot provision VLAN network for " - "net-id=%(net_uuid)s - no bridge for " - "physical_network %(physical_network)s"), - {'net_uuid': net_uuid, - 'physical_network': physical_network}) - elif network_type == p_const.TYPE_LOCAL: - # no flows needed for local networks - pass - else: - LOG.error(_("Cannot provision unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), - {'network_type': network_type, - 'net_uuid': net_uuid}) - - def reclaim_local_vlan(self, net_uuid): - '''Reclaim a local VLAN. - - :param net_uuid: the network uuid associated with this vlan. - :param lvm: a LocalVLANMapping object that tracks (vlan, lsw_id, - vif_ids) mapping. - ''' - lvm = self.local_vlan_map.pop(net_uuid, None) - if lvm is None: - LOG.debug(_("Network %s not used on agent."), net_uuid) - return - - LOG.info(_("Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"), - {'vlan_id': lvm.vlan, - 'net_uuid': net_uuid}) - - if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: - if self.enable_tunneling: - self.tun_br.delete_flows( - table=constants.TUN_TABLE[lvm.network_type], - tun_id=lvm.segmentation_id) - self.tun_br.delete_flows(dl_vlan=lvm.vlan) - if self.l2_pop: - # Try to remove tunnel ports if not used by other networks - for ofport in lvm.tun_ofports: - self.cleanup_tunnel_port(ofport, lvm.network_type) - elif lvm.network_type == p_const.TYPE_FLAT: - if lvm.physical_network in self.phys_brs: - # outbound - br = self.phys_brs[lvm.physical_network] - br.delete_flows(in_port=self.phys_ofports[lvm. - physical_network], - dl_vlan=lvm.vlan) - # inbound - br = self.int_br - br.delete_flows(in_port=self.int_ofports[lvm.physical_network], - dl_vlan=0xffff) - elif lvm.network_type == p_const.TYPE_VLAN: - if lvm.physical_network in self.phys_brs: - # outbound - br = self.phys_brs[lvm.physical_network] - br.delete_flows(in_port=self.phys_ofports[lvm. - physical_network], - dl_vlan=lvm.vlan) - # inbound - br = self.int_br - br.delete_flows(in_port=self.int_ofports[lvm.physical_network], - dl_vlan=lvm.segmentation_id) - elif lvm.network_type == p_const.TYPE_LOCAL: - # no flows needed for local networks - pass - else: - LOG.error(_("Cannot reclaim unknown network type " - "%(network_type)s for net-id=%(net_uuid)s"), - {'network_type': lvm.network_type, - 'net_uuid': net_uuid}) - - self.available_local_vlans.add(lvm.vlan) - - def port_bound(self, port, net_uuid, - network_type, physical_network, segmentation_id, - ovs_restarted): - '''Bind port to net_uuid/lsw_id and install flow for inbound traffic - to vm. - - :param port: a ovslib.VifPort object. - :param net_uuid: the net_uuid this port is to be associated with. - :param network_type: the network type ('gre', 'vlan', 'flat', 'local') - :param physical_network: the physical network for 'vlan' or 'flat' - :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' - :param ovs_restarted: indicates if this is called for an OVS restart. - ''' - if net_uuid not in self.local_vlan_map or ovs_restarted: - self.provision_local_vlan(net_uuid, network_type, - physical_network, segmentation_id) - lvm = self.local_vlan_map[net_uuid] - lvm.vif_ports[port.vif_id] = port - # Do not bind a port if it's already bound - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") - if cur_tag != str(lvm.vlan): - self.int_br.set_db_attribute("Port", port.port_name, "tag", - str(lvm.vlan)) - if port.ofport != -1: - self.int_br.delete_flows(in_port=port.ofport) - - def port_unbound(self, vif_id, net_uuid=None): - '''Unbind port. - - Removes corresponding local vlan mapping object if this is its last - VIF. - - :param vif_id: the id of the vif - :param net_uuid: the net_uuid this port is associated with. - ''' - if net_uuid is None: - net_uuid = self.get_net_uuid(vif_id) - - if not self.local_vlan_map.get(net_uuid): - LOG.info(_('port_unbound() net_uuid %s not in local_vlan_map'), - net_uuid) - return - - lvm = self.local_vlan_map[net_uuid] - lvm.vif_ports.pop(vif_id, None) - - if not lvm.vif_ports: - self.reclaim_local_vlan(net_uuid) - - def port_dead(self, port): - '''Once a port has no binding, put it on the "dead vlan". - - :param port: a ovs_lib.VifPort object. - ''' - # Don't kill a port if it's already dead - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") - if cur_tag != DEAD_VLAN_TAG: - self.int_br.set_db_attribute("Port", port.port_name, "tag", - DEAD_VLAN_TAG) - self.int_br.add_flow(priority=2, in_port=port.ofport, - actions="drop") - - def setup_integration_br(self): - '''Setup the integration bridge. - - Create patch ports and remove all existing flows. - - :param bridge_name: the name of the integration bridge. - :returns: the integration bridge - ''' - self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) - self.int_br.remove_all_flows() - # switch all traffic using L2 learning - self.int_br.add_flow(priority=1, actions="normal") - # Add a canary flow to int_br to track OVS restarts - self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, - actions="drop") - - def setup_ancillary_bridges(self, integ_br, tun_br): - '''Setup ancillary bridges - for example br-ex.''' - ovs_bridges = set(ovs_lib.get_bridges(self.root_helper)) - # Remove all known bridges - ovs_bridges.remove(integ_br) - if self.enable_tunneling: - ovs_bridges.remove(tun_br) - br_names = [self.phys_brs[physical_network].br_name for - physical_network in self.phys_brs] - ovs_bridges.difference_update(br_names) - # Filter list of bridges to those that have external - # bridge-id's configured - br_names = [] - for bridge in ovs_bridges: - id = ovs_lib.get_bridge_external_bridge_id(self.root_helper, - bridge) - if id != bridge: - br_names.append(bridge) - ovs_bridges.difference_update(br_names) - ancillary_bridges = [] - for bridge in ovs_bridges: - br = ovs_lib.OVSBridge(bridge, self.root_helper) - LOG.info(_('Adding %s to list of bridges.'), bridge) - ancillary_bridges.append(br) - return ancillary_bridges - - def setup_tunnel_br(self, tun_br=None): - '''Setup the tunnel bridge. - - Creates tunnel bridge, and links it to the integration bridge - using a patch port. - - :param tun_br: the name of the tunnel bridge. - ''' - if not self.tun_br: - self.tun_br = ovs_lib.OVSBridge(tun_br, self.root_helper) - - self.tun_br.reset_bridge() - self.patch_tun_ofport = self.int_br.add_patch_port( - cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) - self.patch_int_ofport = self.tun_br.add_patch_port( - cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) - if int(self.patch_tun_ofport) < 0 or int(self.patch_int_ofport) < 0: - LOG.error(_("Failed to create OVS patch port. Cannot have " - "tunneling enabled on this agent, since this version " - "of OVS does not support tunnels or patch ports. " - "Agent terminated!")) - exit(1) - self.tun_br.remove_all_flows() - - # Table 0 (default) will sort incoming traffic depending on in_port - self.tun_br.add_flow(priority=1, - in_port=self.patch_int_ofport, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN) - self.tun_br.add_flow(priority=0, actions="drop") - if self.arp_responder_enabled: - # ARP broadcast-ed request go to the local ARP_RESPONDER table to - # be locally resolved - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=1, - proto='arp', - dl_dst="ff:ff:ff:ff:ff:ff", - actions=("resubmit(,%s)" % - constants.ARP_RESPONDER)) - # PATCH_LV_TO_TUN table will handle packets coming from patch_int - # unicasts go to table UCAST_TO_TUN where remote addresses are learnt - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.UCAST_TO_TUN) - # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) - # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id - # for each tunnel type, and resubmit to table LEARN_FROM_TUN where - # remote mac addresses will be learnt - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], - priority=0, - actions="drop") - # LEARN_FROM_TUN table will have a single flow using a learn action to - # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac - # addresses (assumes that lvid has already been set by a previous flow) - learned_flow = ("table=%s," - "priority=1," - "hard_timeout=300," - "NXM_OF_VLAN_TCI[0..11]," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "load:0->NXM_OF_VLAN_TCI[]," - "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," - "output:NXM_OF_IN_PORT[]" % - constants.UCAST_TO_TUN) - # Once remote mac addresses are learnt, output packet to patch_int - self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, - priority=1, - actions="learn(%s),output:%s" % - (learned_flow, self.patch_int_ofport)) - # Egress unicast will be handled in table UCAST_TO_TUN, where remote - # mac addresses will be learned. For now, just add a default flow that - # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them - # as broadcasts/multicasts - self.tun_br.add_flow(table=constants.UCAST_TO_TUN, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) - if self.arp_responder_enabled: - # If none of the ARP entries correspond to the requested IP, the - # broadcast-ed packet is resubmitted to the flooding table - self.tun_br.add_flow(table=constants.ARP_RESPONDER, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) - # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, - # for now, add a default drop action - self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, - priority=0, - actions="drop") - - def get_veth_name(self, prefix, name): - """Construct a veth name based on the prefix and name that does not - exceed the maximum length allowed for a linux device. Longer names - are hashed to help ensure uniqueness. - """ - if len(prefix + name) <= ip_lib.VETH_MAX_NAME_LENGTH: - return prefix + name - # We can't just truncate because bridges may be distinguished - # by an ident at the end. A hash over the name should be unique. - # Leave part of the bridge name on for easier identification - hashlen = 6 - namelen = ip_lib.VETH_MAX_NAME_LENGTH - len(prefix) - hashlen - new_name = ('%(prefix)s%(truncated)s%(hash)s' % - {'prefix': prefix, 'truncated': name[0:namelen], - 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) - LOG.warning(_("Creating an interface named %(name)s exceeds the " - "%(limit)d character limitation. It was shortened to " - "%(new_name)s to fit."), - {'name': name, 'limit': ip_lib.VETH_MAX_NAME_LENGTH, - 'new_name': new_name}) - return new_name - - def setup_physical_bridges(self, bridge_mappings): - '''Setup the physical network bridges. - - Creates physical network bridges and links them to the - integration bridge using veths. - - :param bridge_mappings: map physical network names to bridge names. - ''' - self.phys_brs = {} - self.int_ofports = {} - self.phys_ofports = {} - ip_wrapper = ip_lib.IPWrapper(self.root_helper) - for physical_network, bridge in bridge_mappings.iteritems(): - LOG.info(_("Mapping physical network %(physical_network)s to " - "bridge %(bridge)s"), - {'physical_network': physical_network, - 'bridge': bridge}) - # setup physical bridge - if not ip_lib.device_exists(bridge, self.root_helper): - LOG.error(_("Bridge %(bridge)s for physical network " - "%(physical_network)s does not exist. Agent " - "terminated!"), - {'physical_network': physical_network, - 'bridge': bridge}) - sys.exit(1) - br = ovs_lib.OVSBridge(bridge, self.root_helper) - br.remove_all_flows() - br.add_flow(priority=1, actions="normal") - self.phys_brs[physical_network] = br - - # create veth to patch physical bridge with integration bridge - int_veth_name = self.get_veth_name( - constants.VETH_INTEGRATION_PREFIX, bridge) - self.int_br.delete_port(int_veth_name) - phys_veth_name = self.get_veth_name( - constants.VETH_PHYSICAL_PREFIX, bridge) - br.delete_port(phys_veth_name) - if ip_lib.device_exists(int_veth_name, self.root_helper): - ip_lib.IPDevice(int_veth_name, self.root_helper).link.delete() - # Give udev a chance to process its rules here, to avoid - # race conditions between commands launched by udev rules - # and the subsequent call to ip_wrapper.add_veth - utils.execute(['/sbin/udevadm', 'settle', '--timeout=10']) - int_veth, phys_veth = ip_wrapper.add_veth(int_veth_name, - phys_veth_name) - self.int_ofports[physical_network] = self.int_br.add_port(int_veth) - self.phys_ofports[physical_network] = br.add_port(phys_veth) - - # block all untranslated traffic over veth between bridges - self.int_br.add_flow(priority=2, - in_port=self.int_ofports[physical_network], - actions="drop") - br.add_flow(priority=2, - in_port=self.phys_ofports[physical_network], - actions="drop") - - # enable veth to pass traffic - int_veth.link.set_up() - phys_veth.link.set_up() - - if self.veth_mtu: - # set up mtu size for veth interfaces - int_veth.link.set_mtu(self.veth_mtu) - phys_veth.link.set_mtu(self.veth_mtu) - - def scan_ports(self, registered_ports, updated_ports=None): - cur_ports = self.int_br.get_vif_port_set() - self.int_br_device_count = len(cur_ports) - port_info = {'current': cur_ports} - if updated_ports is None: - updated_ports = set() - updated_ports.update(self.check_changed_vlans(registered_ports)) - if updated_ports: - # Some updated ports might have been removed in the - # meanwhile, and therefore should not be processed. - # In this case the updated port won't be found among - # current ports. - updated_ports &= cur_ports - if updated_ports: - port_info['updated'] = updated_ports - - # FIXME(salv-orlando): It's not really necessary to return early - # if nothing has changed. - if cur_ports == registered_ports: - # No added or removed ports to set, just return here - return port_info - - port_info['added'] = cur_ports - registered_ports - # Remove all the known ports not found on the integration bridge - port_info['removed'] = registered_ports - cur_ports - return port_info - - def check_changed_vlans(self, registered_ports): - """Return ports which have lost their vlan tag. - - The returned value is a set of port ids of the ports concerned by a - vlan tag loss. - """ - port_tags = self.int_br.get_port_tag_dict() - changed_ports = set() - for lvm in self.local_vlan_map.values(): - for port in registered_ports: - if ( - port in lvm.vif_ports - and lvm.vif_ports[port].port_name in port_tags - and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan - ): - LOG.info( - _("Port '%(port_name)s' has lost " - "its vlan tag '%(vlan_tag)d'!"), - {'port_name': lvm.vif_ports[port].port_name, - 'vlan_tag': lvm.vlan} - ) - changed_ports.add(port) - return changed_ports - - def update_ancillary_ports(self, registered_ports): - ports = set() - for bridge in self.ancillary_brs: - ports |= bridge.get_vif_port_set() - - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def treat_vif_port(self, vif_port, port_id, network_id, network_type, - physical_network, segmentation_id, admin_state_up, - ovs_restarted): - # When this function is called for a port, the port should have - # an OVS ofport configured, as only these ports were considered - # for being treated. If that does not happen, it is a potential - # error condition of which operators should be aware - if not vif_port.ofport: - LOG.warn(_("VIF port: %s has no ofport configured, and might not " - "be able to transmit"), vif_port.vif_id) - if vif_port: - if admin_state_up: - self.port_bound(vif_port, network_id, network_type, - physical_network, segmentation_id, - ovs_restarted) - else: - self.port_dead(vif_port) - else: - LOG.debug(_("No VIF port for port %s defined on agent."), port_id) - - def setup_tunnel_port(self, port_name, remote_ip, tunnel_type): - ofport = self.tun_br.add_tunnel_port(port_name, - remote_ip, - self.local_ip, - tunnel_type, - self.vxlan_udp_port, - self.dont_fragment) - ofport_int = -1 - try: - ofport_int = int(ofport) - except (TypeError, ValueError): - LOG.exception(_("ofport should have a value that can be " - "interpreted as an integer")) - if ofport_int < 0: - LOG.error(_("Failed to set-up %(type)s tunnel port to %(ip)s"), - {'type': tunnel_type, 'ip': remote_ip}) - return 0 - - self.tun_br_ofports[tunnel_type][remote_ip] = ofport - # Add flow in default table to resubmit to the right - # tunnelling table (lvid will be set in the latter) - self.tun_br.add_flow(priority=1, - in_port=ofport, - actions="resubmit(,%s)" % - constants.TUN_TABLE[tunnel_type]) - - ofports = ','.join(self.tun_br_ofports[tunnel_type].values()) - if ofports and not self.l2_pop: - # Update flooding flows to include the new tunnel - for network_id, vlan_mapping in self.local_vlan_map.iteritems(): - if vlan_mapping.network_type == tunnel_type: - self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=vlan_mapping.vlan, - actions="strip_vlan," - "set_tunnel:%s,output:%s" % - (vlan_mapping.segmentation_id, - ofports)) - return ofport - - def cleanup_tunnel_port(self, tun_ofport, tunnel_type): - # Check if this tunnel port is still used - for lvm in self.local_vlan_map.values(): - if tun_ofport in lvm.tun_ofports: - break - # If not, remove it - else: - for remote_ip, ofport in self.tun_br_ofports[tunnel_type].items(): - if ofport == tun_ofport: - port_name = '%s-%s' % (tunnel_type, - self.get_ip_in_hex(remote_ip)) - self.tun_br.delete_port(port_name) - self.tun_br.delete_flows(in_port=ofport) - self.tun_br_ofports[tunnel_type].pop(remote_ip, None) - - def treat_devices_added_or_updated(self, devices, ovs_restarted): - resync = False - for device in devices: - LOG.debug(_("Processing port %s"), device) - port = self.int_br.get_vif_port_by_id(device) - if not port: - # The port has disappeared and should not be processed - # There is no need to put the port DOWN in the plugin as - # it never went up in the first place - LOG.info(_("Port %s was not found on the integration bridge " - "and will therefore not be processed"), device) - continue - try: - # TODO(salv-orlando): Provide bulk API for retrieving - # details for all devices in one call - details = self.plugin_rpc.get_device_details(self.context, - device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get port details for " - "%(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if 'port_id' in details: - LOG.info(_("Port %(device)s updated. Details: %(details)s"), - {'device': device, 'details': details}) - self.treat_vif_port(port, details['port_id'], - details['network_id'], - details['network_type'], - details['physical_network'], - details['segmentation_id'], - details['admin_state_up'], - ovs_restarted) - # update plugin about port status - if details.get('admin_state_up'): - LOG.debug(_("Setting status for %s to UP"), device) - self.plugin_rpc.update_device_up( - self.context, device, self.agent_id, cfg.CONF.host) - else: - LOG.debug(_("Setting status for %s to DOWN"), device) - self.plugin_rpc.update_device_down( - self.context, device, self.agent_id, cfg.CONF.host) - LOG.info(_("Configuration for device %s completed."), device) - else: - LOG.warn(_("Device %s not defined on plugin"), device) - if (port and port.ofport != -1): - self.port_dead(port) - return resync - - def treat_ancillary_devices_added(self, devices): - resync = False - for device in devices: - LOG.info(_("Ancillary Port %s added"), device) - try: - self.plugin_rpc.get_device_details(self.context, device, - self.agent_id) - except Exception as e: - LOG.debug(_("Unable to get port details for " - "%(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - - # update plugin about port status - self.plugin_rpc.update_device_up(self.context, - device, - self.agent_id, - cfg.CONF.host) - return resync - - def treat_devices_removed(self, devices): - resync = False - self.sg_agent.remove_devices_filter(devices) - for device in devices: - LOG.info(_("Attachment %s removed"), device) - try: - self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("port_removed failed for %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - self.port_unbound(device) - return resync - - def treat_ancillary_devices_removed(self, devices): - resync = False - for device in devices: - LOG.info(_("Attachment %s removed"), device) - try: - details = self.plugin_rpc.update_device_down(self.context, - device, - self.agent_id, - cfg.CONF.host) - except Exception as e: - LOG.debug(_("port_removed failed for %(device)s: %(e)s"), - {'device': device, 'e': e}) - resync = True - continue - if details['exists']: - LOG.info(_("Port %s updated."), device) - # Nothing to do regarding local networking - else: - LOG.debug(_("Device %s not defined on plugin"), device) - return resync - - def process_network_ports(self, port_info, ovs_restarted): - resync_a = False - resync_b = False - # TODO(salv-orlando): consider a solution for ensuring notifications - # are processed exactly in the same order in which they were - # received. This is tricky because there are two notification - # sources: the neutron server, and the ovs db monitor process - # If there is an exception while processing security groups ports - # will not be wired anyway, and a resync will be triggered - # TODO(salv-orlando): Optimize avoiding applying filters unnecessarily - # (eg: when there are no IP address changes) - self.sg_agent.setup_port_filters(port_info.get('added', set()), - port_info.get('updated', set())) - # VIF wiring needs to be performed always for 'new' devices. - # For updated ports, re-wiring is not needed in most cases, but needs - # to be performed anyway when the admin state of a device is changed. - # A device might be both in the 'added' and 'updated' - # list at the same time; avoid processing it twice. - devices_added_updated = (port_info.get('added', set()) | - port_info.get('updated', set())) - if devices_added_updated: - start = time.time() - resync_a = self.treat_devices_added_or_updated( - devices_added_updated, ovs_restarted) - LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" - "treat_devices_added_or_updated completed " - "in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - if 'removed' in port_info: - start = time.time() - resync_b = self.treat_devices_removed(port_info['removed']) - LOG.debug(_("process_network_ports - iteration:%(iter_num)d -" - "treat_devices_removed completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # If one of the above operations fails => resync with plugin - return (resync_a | resync_b) - - def process_ancillary_network_ports(self, port_info): - resync_a = False - resync_b = False - if 'added' in port_info: - start = time.time() - resync_a = self.treat_ancillary_devices_added(port_info['added']) - LOG.debug(_("process_ancillary_network_ports - iteration: " - "%(iter_num)d - treat_ancillary_devices_added " - "completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - if 'removed' in port_info: - start = time.time() - resync_b = self.treat_ancillary_devices_removed( - port_info['removed']) - LOG.debug(_("process_ancillary_network_ports - iteration: " - "%(iter_num)d - treat_ancillary_devices_removed " - "completed in %(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - - # If one of the above operations fails => resync with plugin - return (resync_a | resync_b) - - def get_ip_in_hex(self, ip_address): - try: - return '%08x' % netaddr.IPAddress(ip_address, version=4) - except Exception: - LOG.warn(_("Unable to create tunnel port. Invalid remote IP: %s"), - ip_address) - return - - def tunnel_sync(self): - resync = False - try: - for tunnel_type in self.tunnel_types: - details = self.plugin_rpc.tunnel_sync(self.context, - self.local_ip, - tunnel_type) - if not self.l2_pop: - tunnels = details['tunnels'] - for tunnel in tunnels: - if self.local_ip != tunnel['ip_address']: - tunnel_id = tunnel.get('id') - # Unlike the OVS plugin, ML2 doesn't return an id - # key. So use ip_address to form port name instead. - # Port name must be <=15 chars, so use shorter hex. - remote_ip = tunnel['ip_address'] - remote_ip_hex = self.get_ip_in_hex(remote_ip) - if not tunnel_id and not remote_ip_hex: - continue - tun_name = '%s-%s' % (tunnel_type, - tunnel_id or remote_ip_hex) - self.setup_tunnel_port(tun_name, - tunnel['ip_address'], - tunnel_type) - except Exception as e: - LOG.debug(_("Unable to sync tunnel IP %(local_ip)s: %(e)s"), - {'local_ip': self.local_ip, 'e': e}) - resync = True - return resync - - def _agent_has_updates(self, polling_manager): - return (polling_manager.is_polling_required or - self.updated_ports or - self.sg_agent.firewall_refresh_needed()) - - def _port_info_has_changes(self, port_info): - return (port_info.get('added') or - port_info.get('removed') or - port_info.get('updated')) - - def check_ovs_restart(self): - # Check for the canary flow - canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) - return not canary_flow - - def rpc_loop(self, polling_manager=None): - if not polling_manager: - polling_manager = polling.AlwaysPoll() - - sync = True - ports = set() - updated_ports_copy = set() - ancillary_ports = set() - tunnel_sync = True - ovs_restarted = False - while True: - start = time.time() - port_stats = {'regular': {'added': 0, - 'updated': 0, - 'removed': 0}, - 'ancillary': {'added': 0, - 'removed': 0}} - LOG.debug(_("Agent rpc_loop - iteration:%d started"), - self.iter_num) - if sync: - LOG.info(_("Agent out of sync with plugin!")) - ports.clear() - ancillary_ports.clear() - sync = False - polling_manager.force_polling() - # Notify the plugin of tunnel IP - if self.enable_tunneling and tunnel_sync: - LOG.info(_("Agent tunnel out of sync with plugin!")) - try: - tunnel_sync = self.tunnel_sync() - except Exception: - LOG.exception(_("Error while synchronizing tunnels")) - tunnel_sync = True - ovs_restarted = self.check_ovs_restart() - if ovs_restarted: - self.setup_integration_br() - self.setup_physical_bridges(self.bridge_mappings) - if self.enable_tunneling: - self.setup_tunnel_br() - if self._agent_has_updates(polling_manager) or ovs_restarted: - try: - LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " - "starting polling. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # Save updated ports dict to perform rollback in - # case resync would be needed, and then clear - # self.updated_ports. As the greenthread should not yield - # between these two statements, this will be thread-safe - updated_ports_copy = self.updated_ports - self.updated_ports = set() - reg_ports = (set() if ovs_restarted else ports) - port_info = self.scan_ports(reg_ports, updated_ports_copy) - ports = port_info['current'] - LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d - " - "port information retrieved. " - "Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - # Secure and wire/unwire VIFs and update their status - # on Neutron server - if (self._port_info_has_changes(port_info) or - self.sg_agent.firewall_refresh_needed() or - ovs_restarted): - LOG.debug(_("Starting to process devices in:%s"), - port_info) - # If treat devices fails - must resync with plugin - sync = self.process_network_ports(port_info, - ovs_restarted) - LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" - "ports processed. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - port_stats['regular']['added'] = ( - len(port_info.get('added', []))) - port_stats['regular']['updated'] = ( - len(port_info.get('updated', []))) - port_stats['regular']['removed'] = ( - len(port_info.get('removed', []))) - # Treat ancillary devices if they exist - if self.ancillary_brs: - port_info = self.update_ancillary_ports( - ancillary_ports) - LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d -" - "ancillary port info retrieved. " - "Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - - if port_info: - rc = self.process_ancillary_network_ports( - port_info) - LOG.debug(_("Agent rpc_loop - iteration:" - "%(iter_num)d - ancillary ports " - "processed. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'elapsed': time.time() - start}) - ancillary_ports = port_info['current'] - port_stats['ancillary']['added'] = ( - len(port_info.get('added', []))) - port_stats['ancillary']['removed'] = ( - len(port_info.get('removed', []))) - sync = sync | rc - - polling_manager.polling_completed() - except Exception: - LOG.exception(_("Error while processing VIF ports")) - # Put the ports back in self.updated_port - self.updated_ports |= updated_ports_copy - sync = True - - # sleep till end of polling interval - elapsed = (time.time() - start) - LOG.debug(_("Agent rpc_loop - iteration:%(iter_num)d " - "completed. Processed ports statistics: " - "%(port_stats)s. Elapsed:%(elapsed).3f"), - {'iter_num': self.iter_num, - 'port_stats': port_stats, - 'elapsed': elapsed}) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - self.iter_num = self.iter_num + 1 - - def daemon_loop(self): - with polling.get_polling_manager( - self.minimize_polling, - self.root_helper, - self.ovsdb_monitor_respawn_interval) as pm: - - self.rpc_loop(polling_manager=pm) - - -def handle_sigterm(signum, frame): - sys.exit(1) - - -def create_agent_config_map(config): - """Create a map of agent config parameters. - - :param config: an instance of cfg.CONF - :returns: a map of agent configuration parameters - """ - try: - bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) - except ValueError as e: - raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) - - kwargs = dict( - integ_br=config.OVS.integration_bridge, - tun_br=config.OVS.tunnel_bridge, - local_ip=config.OVS.local_ip, - bridge_mappings=bridge_mappings, - root_helper=config.AGENT.root_helper, - polling_interval=config.AGENT.polling_interval, - minimize_polling=config.AGENT.minimize_polling, - tunnel_types=config.AGENT.tunnel_types, - veth_mtu=config.AGENT.veth_mtu, - l2_population=config.AGENT.l2_population, - arp_responder=config.AGENT.arp_responder, - ) - - # If enable_tunneling is TRUE, set tunnel_type to default to GRE - if config.OVS.enable_tunneling and not kwargs['tunnel_types']: - kwargs['tunnel_types'] = [p_const.TYPE_GRE] - - # Verify the tunnel_types specified are valid - for tun in kwargs['tunnel_types']: - if tun not in constants.TUNNEL_NETWORK_TYPES: - msg = _('Invalid tunnel type specified: %s'), tun - raise ValueError(msg) - if not kwargs['local_ip']: - msg = _('Tunneling cannot be enabled without a valid local_ip.') - raise ValueError(msg) - - return kwargs - - -def main(): - cfg.CONF.register_opts(ip_lib.OPTS) - common_config.init(sys.argv[1:]) - common_config.setup_logging(cfg.CONF) - q_utils.log_opt_values(LOG) - - try: - agent_config = create_agent_config_map(cfg.CONF) - except ValueError as e: - LOG.error(_('%s Agent terminated!'), e) - sys.exit(1) - - is_xen_compute_host = 'rootwrap-xen-dom0' in agent_config['root_helper'] - if is_xen_compute_host: - # Force ip_lib to always use the root helper to ensure that ip - # commands target xen dom0 rather than domU. - cfg.CONF.set_default('ip_lib_force_root', True) - - agent = OVSNeutronAgent(**agent_config) - signal.signal(signal.SIGTERM, handle_sigterm) - - # Start everything. - LOG.info(_("Agent initialized successfully, now running... ")) - agent.daemon_loop() - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/openvswitch/agent/xenapi/README b/neutron/plugins/openvswitch/agent/xenapi/README deleted file mode 100644 index 0a02c99e1..000000000 --- a/neutron/plugins/openvswitch/agent/xenapi/README +++ /dev/null @@ -1,16 +0,0 @@ -This directory contains files that are required for the XenAPI support. -They should be installed in the XenServer / Xen Cloud Platform dom0. - -If you install them manually, you will need to ensure that the newly -added files are executable. You can do this by running the following -command (from dom0): - - chmod a+x /etc/xapi.d/plugins/* - -Otherwise, you can build an rpm by running the following command: - - ./contrib/build-rpm.sh - -and install the rpm by running the following command (from dom0): - - rpm -i openstack-neutron-xen-plugins.rpm diff --git a/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh b/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh deleted file mode 100755 index 81b5f3b31..000000000 --- a/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -set -eux - -thisdir=$(dirname $(readlink -f "$0")) -export NEUTRON_ROOT="$thisdir/../../../../../../" -export PYTHONPATH=$NEUTRON_ROOT - -cd $NEUTRON_ROOT -VERSION=$(sh -c "(cat $NEUTRON_ROOT/neutron/version.py; \ - echo 'print version_info.release_string()') | \ - python") -cd - - -PACKAGE=openstack-neutron-xen-plugins -RPMBUILD_DIR=$PWD/rpmbuild -if [ ! -d $RPMBUILD_DIR ]; then - echo $RPMBUILD_DIR is missing - exit 1 -fi - -for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do - rm -rf $RPMBUILD_DIR/$dir - mkdir -p $RPMBUILD_DIR/$dir -done - -rm -rf /tmp/$PACKAGE -mkdir /tmp/$PACKAGE -cp -r ../etc/xapi.d /tmp/$PACKAGE -tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE - -rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ - --define "version $VERSION" \ - $RPMBUILD_DIR/SPECS/$PACKAGE.spec diff --git a/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec b/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec deleted file mode 100644 index 8ba03eaf1..000000000 --- a/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec +++ /dev/null @@ -1,30 +0,0 @@ -Name: openstack-neutron-xen-plugins -Version: %{version} -Release: 1 -Summary: Files for XenAPI support. -License: ASL 2.0 -Group: Applications/Utilities -Source0: openstack-neutron-xen-plugins.tar.gz -BuildArch: noarch -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -%define debug_package %{nil} - -%description -This package contains files that are required for XenAPI support for Neutron. - -%prep -%setup -q -n openstack-neutron-xen-plugins - -%install -rm -rf $RPM_BUILD_ROOT -mkdir -p $RPM_BUILD_ROOT/etc -cp -r xapi.d $RPM_BUILD_ROOT/etc -chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -/etc/xapi.d/plugins/* diff --git a/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap b/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap deleted file mode 100644 index 21909e846..000000000 --- a/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2012 OpenStack Foundation -# Copyright 2012 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# XenAPI plugin for executing network commands (ovs, iptables, etc) on dom0 -# - -import gettext -gettext.install('neutron', unicode=1) -try: - import json -except ImportError: - import simplejson as json -import subprocess - -import XenAPIPlugin - - -ALLOWED_CMDS = [ - 'ip', - 'ovs-ofctl', - 'ovs-vsctl', - ] - - -class PluginError(Exception): - """Base Exception class for all plugin errors.""" - def __init__(self, *args): - Exception.__init__(self, *args) - -def _run_command(cmd, cmd_input): - """Abstracts out the basics of issuing system commands. If the command - returns anything in stderr, a PluginError is raised with that information. - Otherwise, the output from stdout is returned. - """ - pipe = subprocess.PIPE - proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe, - stderr=pipe, close_fds=True) - (out, err) = proc.communicate(cmd_input) - - if err: - raise PluginError(err) - return out - - -def run_command(session, args): - cmd = json.loads(args.get('cmd')) - if cmd and cmd[0] not in ALLOWED_CMDS: - msg = _("Dom0 execution of '%s' is not permitted") % cmd[0] - raise PluginError(msg) - result = _run_command(cmd, json.loads(args.get('cmd_input', 'null'))) - return json.dumps(result) - - -if __name__ == "__main__": - XenAPIPlugin.dispatch({"run_command": run_command}) diff --git a/neutron/plugins/openvswitch/common/__init__.py b/neutron/plugins/openvswitch/common/__init__.py deleted file mode 100644 index e5f41adfe..000000000 --- a/neutron/plugins/openvswitch/common/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/openvswitch/common/config.py b/neutron/plugins/openvswitch/common/config.py deleted file mode 100644 index 07ba94168..000000000 --- a/neutron/plugins/openvswitch/common/config.py +++ /dev/null @@ -1,94 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.plugins.openvswitch.common import constants - - -DEFAULT_BRIDGE_MAPPINGS = [] -DEFAULT_VLAN_RANGES = [] -DEFAULT_TUNNEL_RANGES = [] -DEFAULT_TUNNEL_TYPES = [] - -ovs_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("Integration bridge to use")), - cfg.BoolOpt('enable_tunneling', default=False, - help=_("Enable tunneling support")), - cfg.StrOpt('tunnel_bridge', default='br-tun', - help=_("Tunnel bridge to use")), - cfg.StrOpt('int_peer_patch_port', default='patch-tun', - help=_("Peer patch port in integration bridge for tunnel " - "bridge")), - cfg.StrOpt('tun_peer_patch_port', default='patch-int', - help=_("Peer patch port in tunnel bridge for integration " - "bridge")), - cfg.StrOpt('local_ip', default='', - help=_("Local IP address of GRE tunnel endpoints.")), - cfg.ListOpt('bridge_mappings', - default=DEFAULT_BRIDGE_MAPPINGS, - help=_("List of :")), - cfg.StrOpt('tenant_network_type', default='local', - help=_("Network type for tenant networks " - "(local, vlan, gre, vxlan, or none)")), - cfg.ListOpt('network_vlan_ranges', - default=DEFAULT_VLAN_RANGES, - help=_("List of :: " - "or ")), - cfg.ListOpt('tunnel_id_ranges', - default=DEFAULT_TUNNEL_RANGES, - help=_("List of :")), - cfg.StrOpt('tunnel_type', default='', - help=_("The type of tunnels to use when utilizing tunnels, " - "either 'gre' or 'vxlan'")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), - cfg.BoolOpt('minimize_polling', - default=True, - help=_("Minimize polling by monitoring ovsdb for interface " - "changes.")), - cfg.IntOpt('ovsdb_monitor_respawn_interval', - default=constants.DEFAULT_OVSDBMON_RESPAWN, - help=_("The number of seconds to wait before respawning the " - "ovsdb monitor after losing communication with it")), - cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES, - help=_("Network types supported by the agent " - "(gre and/or vxlan)")), - cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT, - help=_("The UDP port to use for VXLAN tunnels.")), - cfg.IntOpt('veth_mtu', - help=_("MTU size of veth interfaces")), - cfg.BoolOpt('l2_population', default=False, - help=_("Use ml2 l2population mechanism driver to learn " - "remote mac and IPs and improve tunnel scalability")), - cfg.BoolOpt('arp_responder', default=False, - help=_("Enable local ARP responder if it is supported")), - cfg.BoolOpt('dont_fragment', default=True, - help=_("Set or un-set the don't fragment (DF) bit on " - "outgoing IP packet carrying GRE/VXLAN tunnel")), -] - - -cfg.CONF.register_opts(ovs_opts, "OVS") -cfg.CONF.register_opts(agent_opts, "AGENT") -config.register_agent_state_opts_helper(cfg.CONF) -config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/openvswitch/common/constants.py b/neutron/plugins/openvswitch/common/constants.py deleted file mode 100644 index 57f086325..000000000 --- a/neutron/plugins/openvswitch/common/constants.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutron.plugins.common import constants as p_const - - -# Special vlan_id value in ovs_vlan_allocations table indicating flat network -FLAT_VLAN_ID = -1 - -# Topic for tunnel notifications between the plugin and agent -TUNNEL = 'tunnel' - -# Values for network_type -VXLAN_UDP_PORT = 4789 - -# Name prefixes for veth device pair linking the integration bridge -# with the physical bridge for a physical network -VETH_INTEGRATION_PREFIX = 'int-' -VETH_PHYSICAL_PREFIX = 'phy-' - -# The different types of tunnels -TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN] - -# Various tables for tunneling flows -PATCH_LV_TO_TUN = 1 -GRE_TUN_TO_LV = 2 -VXLAN_TUN_TO_LV = 3 -LEARN_FROM_TUN = 10 -UCAST_TO_TUN = 20 -ARP_RESPONDER = 21 -FLOOD_TO_TUN = 22 -CANARY_TABLE = 23 - -# Map tunnel types to tables number -TUN_TABLE = {p_const.TYPE_GRE: GRE_TUN_TO_LV, - p_const.TYPE_VXLAN: VXLAN_TUN_TO_LV} - -# The default respawn interval for the ovsdb monitor -DEFAULT_OVSDBMON_RESPAWN = 30 - -# Special return value for an invalid OVS ofport -INVALID_OFPORT = '-1' diff --git a/neutron/plugins/openvswitch/ovs_db_v2.py b/neutron/plugins/openvswitch/ovs_db_v2.py deleted file mode 100644 index 75d0ec70d..000000000 --- a/neutron/plugins/openvswitch/ovs_db_v2.py +++ /dev/null @@ -1,396 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six import moves -from sqlalchemy import func -from sqlalchemy.orm import exc - -from neutron.common import exceptions as n_exc -import neutron.db.api as db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import securitygroup as ext_sg -from neutron import manager -from neutron.openstack.common.db import exception as db_exc -from neutron.openstack.common import log as logging -from neutron.plugins.openvswitch.common import constants -from neutron.plugins.openvswitch import ovs_models_v2 - -LOG = logging.getLogger(__name__) - - -def get_network_binding(session, network_id): - session = session or db.get_session() - try: - binding = (session.query(ovs_models_v2.NetworkBinding). - filter_by(network_id=network_id). - one()) - return binding - except exc.NoResultFound: - return - - -def add_network_binding(session, network_id, network_type, - physical_network, segmentation_id): - with session.begin(subtransactions=True): - binding = ovs_models_v2.NetworkBinding(network_id, network_type, - physical_network, - segmentation_id) - session.add(binding) - return binding - - -def sync_vlan_allocations(network_vlan_ranges): - """Synchronize vlan_allocations table with configured VLAN ranges.""" - - session = db.get_session() - with session.begin(): - # get existing allocations for all physical networks - allocations = dict() - allocs = (session.query(ovs_models_v2.VlanAllocation). - all()) - for alloc in allocs: - if alloc.physical_network not in allocations: - allocations[alloc.physical_network] = set() - allocations[alloc.physical_network].add(alloc) - - # process vlan ranges for each configured physical network - for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): - # determine current configured allocatable vlans for this - # physical network - vlan_ids = set() - for vlan_range in vlan_ranges: - vlan_ids |= set(moves.xrange(vlan_range[0], vlan_range[1] + 1)) - - # remove from table unallocated vlans not currently allocatable - if physical_network in allocations: - for alloc in allocations[physical_network]: - try: - # see if vlan is allocatable - vlan_ids.remove(alloc.vlan_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing vlan %(vlan_id)s on " - "physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': physical_network}) - session.delete(alloc) - del allocations[physical_network] - - # add missing allocatable vlans to table - for vlan_id in sorted(vlan_ids): - alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) - session.add(alloc) - - # remove from table unallocated vlans for any unconfigured physical - # networks - for allocs in allocations.itervalues(): - for alloc in allocs: - if not alloc.allocated: - LOG.debug(_("Removing vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': alloc.physical_network}) - session.delete(alloc) - - -def get_vlan_allocation(physical_network, vlan_id): - session = db.get_session() - try: - alloc = (session.query(ovs_models_v2.VlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - one()) - return alloc - except exc.NoResultFound: - return - - -def reserve_vlan(session): - with session.begin(subtransactions=True): - alloc = (session.query(ovs_models_v2.VlanAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if alloc: - LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " - "%(physical_network)s from pool"), - {'vlan_id': alloc.vlan_id, - 'physical_network': alloc.physical_network}) - alloc.allocated = True - return (alloc.physical_network, alloc.vlan_id) - raise n_exc.NoNetworkAvailable() - - -def reserve_specific_vlan(session, physical_network, vlan_id): - with session.begin(subtransactions=True): - try: - alloc = (session.query(ovs_models_v2.VlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - if alloc.allocated: - if vlan_id == constants.FLAT_VLAN_ID: - raise n_exc.FlatNetworkInUse( - physical_network=physical_network) - else: - raise n_exc.VlanIdInUse(vlan_id=vlan_id, - physical_network=physical_network) - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s from pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - alloc.allocated = True - except exc.NoResultFound: - LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " - "network %(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - alloc = ovs_models_v2.VlanAllocation(physical_network, vlan_id) - alloc.allocated = True - session.add(alloc) - - -def release_vlan(session, physical_network, vlan_id, network_vlan_ranges): - with session.begin(subtransactions=True): - try: - alloc = (session.query(ovs_models_v2.VlanAllocation). - filter_by(physical_network=physical_network, - vlan_id=vlan_id). - with_lockmode('update'). - one()) - alloc.allocated = False - inside = False - for vlan_range in network_vlan_ranges.get(physical_network, []): - if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: - inside = True - break - if not inside: - session.delete(alloc) - LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " - "%(physical_network)s outside pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - else: - LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " - "%(physical_network)s to pool"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - except exc.NoResultFound: - LOG.warning(_("vlan_id %(vlan_id)s on physical network " - "%(physical_network)s not found"), - {'vlan_id': vlan_id, - 'physical_network': physical_network}) - - -def sync_tunnel_allocations(tunnel_id_ranges): - """Synchronize tunnel_allocations table with configured tunnel ranges.""" - - # determine current configured allocatable tunnels - tunnel_ids = set() - for tunnel_id_range in tunnel_id_ranges: - tun_min, tun_max = tunnel_id_range - if tun_max + 1 - tun_min > 1000000: - LOG.error(_("Skipping unreasonable tunnel ID range " - "%(tun_min)s:%(tun_max)s"), - {'tun_min': tun_min, 'tun_max': tun_max}) - else: - tunnel_ids |= set(moves.xrange(tun_min, tun_max + 1)) - - session = db.get_session() - with session.begin(): - # remove from table unallocated tunnels not currently allocatable - allocs = (session.query(ovs_models_v2.TunnelAllocation). - all()) - for alloc in allocs: - try: - # see if tunnel is allocatable - tunnel_ids.remove(alloc.tunnel_id) - except KeyError: - # it's not allocatable, so check if its allocated - if not alloc.allocated: - # it's not, so remove it from table - LOG.debug(_("Removing tunnel %s from pool"), - alloc.tunnel_id) - session.delete(alloc) - - # add missing allocatable tunnels to table - for tunnel_id in sorted(tunnel_ids): - alloc = ovs_models_v2.TunnelAllocation(tunnel_id) - session.add(alloc) - - -def get_tunnel_allocation(tunnel_id): - session = db.get_session() - try: - alloc = (session.query(ovs_models_v2.TunnelAllocation). - filter_by(tunnel_id=tunnel_id). - with_lockmode('update'). - one()) - return alloc - except exc.NoResultFound: - return - - -def reserve_tunnel(session): - with session.begin(subtransactions=True): - alloc = (session.query(ovs_models_v2.TunnelAllocation). - filter_by(allocated=False). - with_lockmode('update'). - first()) - if alloc: - LOG.debug(_("Reserving tunnel %s from pool"), alloc.tunnel_id) - alloc.allocated = True - return alloc.tunnel_id - raise n_exc.NoNetworkAvailable() - - -def reserve_specific_tunnel(session, tunnel_id): - with session.begin(subtransactions=True): - try: - alloc = (session.query(ovs_models_v2.TunnelAllocation). - filter_by(tunnel_id=tunnel_id). - with_lockmode('update'). - one()) - if alloc.allocated: - raise n_exc.TunnelIdInUse(tunnel_id=tunnel_id) - LOG.debug(_("Reserving specific tunnel %s from pool"), tunnel_id) - alloc.allocated = True - except exc.NoResultFound: - LOG.debug(_("Reserving specific tunnel %s outside pool"), - tunnel_id) - alloc = ovs_models_v2.TunnelAllocation(tunnel_id) - alloc.allocated = True - session.add(alloc) - - -def release_tunnel(session, tunnel_id, tunnel_id_ranges): - with session.begin(subtransactions=True): - try: - alloc = (session.query(ovs_models_v2.TunnelAllocation). - filter_by(tunnel_id=tunnel_id). - with_lockmode('update'). - one()) - alloc.allocated = False - inside = False - for tunnel_id_range in tunnel_id_ranges: - if (tunnel_id >= tunnel_id_range[0] - and tunnel_id <= tunnel_id_range[1]): - inside = True - break - if not inside: - session.delete(alloc) - LOG.debug(_("Releasing tunnel %s outside pool"), tunnel_id) - else: - LOG.debug(_("Releasing tunnel %s to pool"), tunnel_id) - except exc.NoResultFound: - LOG.warning(_("tunnel_id %s not found"), tunnel_id) - - -def get_port(port_id): - session = db.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - except exc.NoResultFound: - port = None - return port - - -def get_port_from_device(port_id): - """Get port from database.""" - LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id == port_id) - port_and_sgs = query.all() - if not port_and_sgs: - return None - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict[ext_sg.SECURITYGROUPS] = [ - sg_id for port_, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] - for ip in port['fixed_ips']] - return port_dict - - -def set_port_status(port_id, status): - session = db.get_session() - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.merge(port) - session.flush() - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) - - -def get_tunnel_endpoints(): - session = db.get_session() - - tunnels = session.query(ovs_models_v2.TunnelEndpoint) - return [{'id': tunnel.id, - 'ip_address': tunnel.ip_address} for tunnel in tunnels] - - -def _generate_tunnel_id(session): - max_tunnel_id = session.query( - func.max(ovs_models_v2.TunnelEndpoint.id)).scalar() or 0 - return max_tunnel_id + 1 - - -def add_tunnel_endpoint(ip, max_retries=10): - """Return the endpoint of the given IP address or generate a new one.""" - - # NOTE(rpodolyaka): generation of a new tunnel endpoint must be put into a - # repeatedly executed transactional block to ensure it - # doesn't conflict with any other concurrently executed - # DB transactions in spite of the specified transactions - # isolation level value - for i in moves.xrange(max_retries): - LOG.debug(_('Adding a tunnel endpoint for %s'), ip) - try: - session = db.get_session() - with session.begin(subtransactions=True): - tunnel = (session.query(ovs_models_v2.TunnelEndpoint). - filter_by(ip_address=ip).with_lockmode('update'). - first()) - - if tunnel is None: - tunnel_id = _generate_tunnel_id(session) - tunnel = ovs_models_v2.TunnelEndpoint(ip, tunnel_id) - session.add(tunnel) - - return tunnel - except db_exc.DBDuplicateEntry: - # a concurrent transaction has been committed, try again - LOG.debug(_('Adding a tunnel endpoint failed due to a concurrent' - 'transaction had been committed (%s attempts left)'), - max_retries - (i + 1)) - - raise n_exc.NeutronException( - message=_('Unable to generate a new tunnel id')) diff --git a/neutron/plugins/openvswitch/ovs_models_v2.py b/neutron/plugins/openvswitch/ovs_models_v2.py deleted file mode 100644 index 59b2c14a9..000000000 --- a/neutron/plugins/openvswitch/ovs_models_v2.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from sqlalchemy import Boolean, Column, ForeignKey, Integer, String -from sqlalchemy.schema import UniqueConstraint - -from neutron.db import model_base -from neutron.db import models_v2 -from sqlalchemy import orm - - -class VlanAllocation(model_base.BASEV2): - """Represents allocation state of vlan_id on physical network.""" - __tablename__ = 'ovs_vlan_allocations' - - physical_network = Column(String(64), nullable=False, primary_key=True) - vlan_id = Column(Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = Column(Boolean, nullable=False) - - def __init__(self, physical_network, vlan_id): - self.physical_network = physical_network - self.vlan_id = vlan_id - self.allocated = False - - def __repr__(self): - return "" % (self.physical_network, - self.vlan_id, self.allocated) - - -class TunnelAllocation(model_base.BASEV2): - """Represents allocation state of tunnel_id.""" - __tablename__ = 'ovs_tunnel_allocations' - - tunnel_id = Column(Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = Column(Boolean, nullable=False) - - def __init__(self, tunnel_id): - self.tunnel_id = tunnel_id - self.allocated = False - - def __repr__(self): - return "" % (self.tunnel_id, self.allocated) - - -class NetworkBinding(model_base.BASEV2): - """Represents binding of virtual network to physical realization.""" - __tablename__ = 'ovs_network_bindings' - - network_id = Column(String(36), - ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - # 'gre', 'vlan', 'flat', 'local' - network_type = Column(String(32), nullable=False) - physical_network = Column(String(64)) - segmentation_id = Column(Integer) # tunnel_id or vlan_id - - network = orm.relationship( - models_v2.Network, - backref=orm.backref("binding", lazy='joined', - uselist=False, cascade='delete')) - - def __init__(self, network_id, network_type, physical_network, - segmentation_id): - self.network_id = network_id - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id - - def __repr__(self): - return "" % (self.network_id, - self.network_type, - self.physical_network, - self.segmentation_id) - - -class TunnelEndpoint(model_base.BASEV2): - """Represents tunnel endpoint in RPC mode.""" - __tablename__ = 'ovs_tunnel_endpoints' - __table_args__ = ( - UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'), - model_base.BASEV2.__table_args__, - ) - - ip_address = Column(String(64), primary_key=True) - id = Column(Integer, nullable=False) - - def __init__(self, ip_address, id): - self.ip_address = ip_address - self.id = id - - def __repr__(self): - return "" % (self.ip_address, self.id) diff --git a/neutron/plugins/openvswitch/ovs_neutron_plugin.py b/neutron/plugins/openvswitch/ovs_neutron_plugin.py deleted file mode 100644 index 31698a3df..000000000 --- a/neutron/plugins/openvswitch/ovs_neutron_plugin.py +++ /dev/null @@ -1,623 +0,0 @@ -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from oslo.config import cfg - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants as q_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.common import utils -from neutron.db import agents_db -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extradhcpopt_db -from neutron.db import extraroute_db -from neutron.db import l3_agentschedulers_db -from neutron.db import l3_gwmode_db -from neutron.db import l3_rpc_base -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import extra_dhcp_opt as edo_ext -from neutron.extensions import portbindings -from neutron.extensions import providernet as provider -from neutron import manager -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.common import utils as plugin_utils -from neutron.plugins.openvswitch.common import config # noqa -from neutron.plugins.openvswitch.common import constants -from neutron.plugins.openvswitch import ovs_db_v2 - - -LOG = logging.getLogger(__name__) - - -class OVSRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - - # history - # 1.0 Initial version - # 1.1 Support Security Group RPC - - RPC_API_VERSION = '1.1' - - def __init__(self, notifier, tunnel_type): - super(OVSRpcCallbacks, self).__init__() - self.notifier = notifier - self.tunnel_type = tunnel_type - - @classmethod - def get_port_from_device(cls, device): - port = ovs_db_v2.get_port_from_device(device) - if port: - port['device'] = device - return port - - def get_device_details(self, rpc_context, **kwargs): - """Agent requests device details.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - LOG.debug(_("Device %(device)s details requested from %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - port = ovs_db_v2.get_port(device) - if port: - binding = ovs_db_v2.get_network_binding(None, port['network_id']) - entry = {'device': device, - 'network_id': port['network_id'], - 'port_id': port['id'], - 'admin_state_up': port['admin_state_up'], - 'network_type': binding.network_type, - 'segmentation_id': binding.segmentation_id, - 'physical_network': binding.physical_network} - new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up'] - else q_const.PORT_STATUS_DOWN) - if port['status'] != new_status: - ovs_db_v2.set_port_status(port['id'], new_status) - else: - entry = {'device': device} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_down(self, rpc_context, **kwargs): - """Device no longer exists on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - port = ovs_db_v2.get_port(device) - LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - if port: - entry = {'device': device, - 'exists': True} - plugin = manager.NeutronManager.get_plugin() - if (host and - not plugin.get_port_host(rpc_context, port['id']) == host): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - elif port['status'] != q_const.PORT_STATUS_DOWN: - # Set port status to DOWN - ovs_db_v2.set_port_status(port['id'], - q_const.PORT_STATUS_DOWN) - else: - entry = {'device': device, - 'exists': False} - LOG.debug(_("%s can not be found in database"), device) - return entry - - def update_device_up(self, rpc_context, **kwargs): - """Device is up on agent.""" - agent_id = kwargs.get('agent_id') - device = kwargs.get('device') - host = kwargs.get('host') - port = ovs_db_v2.get_port(device) - LOG.debug(_("Device %(device)s up on %(agent_id)s"), - {'device': device, 'agent_id': agent_id}) - plugin = manager.NeutronManager.get_plugin() - if port: - if (host and - not plugin.get_port_host(rpc_context, port['id']) == host): - LOG.debug(_("Device %(device)s not bound to the" - " agent host %(host)s"), - {'device': device, 'host': host}) - return - elif port['status'] != q_const.PORT_STATUS_ACTIVE: - ovs_db_v2.set_port_status(port['id'], - q_const.PORT_STATUS_ACTIVE) - else: - LOG.debug(_("%s can not be found in database"), device) - - def tunnel_sync(self, rpc_context, **kwargs): - """Update new tunnel. - - Updates the datbase with the tunnel IP. All listening agents will also - be notified about the new tunnel IP. - """ - tunnel_ip = kwargs.get('tunnel_ip') - # Update the database with the IP - tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip) - tunnels = ovs_db_v2.get_tunnel_endpoints() - entry = dict() - entry['tunnels'] = tunnels - # Notify all other listening agents - self.notifier.tunnel_update(rpc_context, tunnel.ip_address, - tunnel.id, self.tunnel_type) - # Return the list of tunnels IP's to the agent - return entry - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - '''Agent side of the openvswitch rpc API. - - API version history: - 1.0 - Initial version. - - ''' - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_network_delete = topics.get_topic_name(topic, - topics.NETWORK, - topics.DELETE) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - self.topic_tunnel_update = topics.get_topic_name(topic, - constants.TUNNEL, - topics.UPDATE) - - def network_delete(self, context, network_id): - self.fanout_cast(context, - self.make_msg('network_delete', - network_id=network_id), - topic=self.topic_network_delete) - - def port_update(self, context, port, network_type, segmentation_id, - physical_network): - self.fanout_cast(context, - self.make_msg('port_update', - port=port, - network_type=network_type, - segmentation_id=segmentation_id, - physical_network=physical_network), - topic=self.topic_port_update) - - def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type): - self.fanout_cast(context, - self.make_msg('tunnel_update', - tunnel_ip=tunnel_ip, - tunnel_id=tunnel_id, - tunnel_type=tunnel_type), - topic=self.topic_tunnel_update) - - -class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - l3_agentschedulers_db.L3AgentSchedulerDbMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - portbindings_db.PortBindingMixin, - extradhcpopt_db.ExtraDhcpOptMixin, - addr_pair_db.AllowedAddressPairsMixin): - - """Implement the Neutron abstractions using Open vSwitch. - - Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or - a new VLAN is created for each network. An agent is relied upon to - perform the actual OVS configuration on each host. - - The provider extension is also supported. As discussed in - https://bugs.launchpad.net/neutron/+bug/1023156, this class could - be simplified, and filtering on extended attributes could be - handled, by adding support for extended attributes to the - NeutronDbPluginV2 base class. When that occurs, this class should - be updated to take advantage of it. - - The port binding extension enables an external application relay - information to and from the plugin. - """ - - # This attribute specifies whether the plugin supports or not - # bulk/pagination/sorting operations. Name mangling is used in - # order to ensure it is qualified by class - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - _supported_extension_aliases = ["provider", "external-net", "router", - "ext-gw-mode", "binding", "quotas", - "security-group", "agent", "extraroute", - "l3_agent_scheduler", - "dhcp_agent_scheduler", - "extra_dhcp_opt", - "allowed-address-pairs"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.NETWORKS, ['_extend_network_dict_provider_ovs']) - - def __init__(self, configfile=None): - super(OVSNeutronPluginV2, self).__init__() - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases, - portbindings.OVS_HYBRID_PLUG: True}} - self._parse_network_vlan_ranges() - ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges) - self.tenant_network_type = cfg.CONF.OVS.tenant_network_type - if self.tenant_network_type not in [svc_constants.TYPE_LOCAL, - svc_constants.TYPE_VLAN, - svc_constants.TYPE_GRE, - svc_constants.TYPE_VXLAN, - svc_constants.TYPE_NONE]: - LOG.error(_("Invalid tenant_network_type: %s. " - "Server terminated!"), - self.tenant_network_type) - sys.exit(1) - self.enable_tunneling = cfg.CONF.OVS.enable_tunneling - self.tunnel_type = None - if self.enable_tunneling: - self.tunnel_type = (cfg.CONF.OVS.tunnel_type or - svc_constants.TYPE_GRE) - elif cfg.CONF.OVS.tunnel_type: - self.tunnel_type = cfg.CONF.OVS.tunnel_type - self.enable_tunneling = True - self.tunnel_id_ranges = [] - if self.enable_tunneling: - self._parse_tunnel_id_ranges() - ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges) - elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES: - LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. " - "Server terminated!"), self.tenant_network_type) - sys.exit(1) - self.setup_rpc() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - self.router_scheduler = importutils.import_object( - cfg.CONF.router_scheduler_driver - ) - - def setup_rpc(self): - # RPC support - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.notifier = AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( - dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( - l3_rpc_agent_api.L3AgentNotifyAPI() - ) - self.endpoints = [OVSRpcCallbacks(self.notifier, self.tunnel_type), - agents_db.AgentExtRpcCallback()] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - # Consume from all consumers in threads - self.conn.consume_in_threads() - - def _parse_network_vlan_ranges(self): - try: - self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges( - cfg.CONF.OVS.network_vlan_ranges) - except Exception as ex: - LOG.error(_("%s. Server terminated!"), ex) - sys.exit(1) - LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges) - - def _parse_tunnel_id_ranges(self): - for entry in cfg.CONF.OVS.tunnel_id_ranges: - entry = entry.strip() - try: - tun_min, tun_max = entry.split(':') - self.tunnel_id_ranges.append((int(tun_min), int(tun_max))) - except ValueError as ex: - LOG.error(_("Invalid tunnel ID range: " - "'%(range)s' - %(e)s. Server terminated!"), - {'range': entry, 'e': ex}) - sys.exit(1) - LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges) - - def _extend_network_dict_provider_ovs(self, network, net_db, - net_binding=None): - # this method used in two cases: when binding is provided explicitly - # and when it is a part of db model object - binding = net_db.binding if net_db else net_binding - network[provider.NETWORK_TYPE] = binding.network_type - if binding.network_type in constants.TUNNEL_NETWORK_TYPES: - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = binding.segmentation_id - elif binding.network_type == svc_constants.TYPE_FLAT: - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = None - elif binding.network_type == svc_constants.TYPE_VLAN: - network[provider.PHYSICAL_NETWORK] = binding.physical_network - network[provider.SEGMENTATION_ID] = binding.segmentation_id - elif binding.network_type == svc_constants.TYPE_LOCAL: - network[provider.PHYSICAL_NETWORK] = None - network[provider.SEGMENTATION_ID] = None - - def _process_provider_create(self, context, attrs): - network_type = attrs.get(provider.NETWORK_TYPE) - physical_network = attrs.get(provider.PHYSICAL_NETWORK) - segmentation_id = attrs.get(provider.SEGMENTATION_ID) - - network_type_set = attributes.is_attr_set(network_type) - physical_network_set = attributes.is_attr_set(physical_network) - segmentation_id_set = attributes.is_attr_set(segmentation_id) - - if not (network_type_set or physical_network_set or - segmentation_id_set): - return (None, None, None) - - if not network_type_set: - msg = _("provider:network_type required") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == svc_constants.TYPE_FLAT: - if segmentation_id_set: - msg = _("provider:segmentation_id specified for flat network") - raise n_exc.InvalidInput(error_message=msg) - else: - segmentation_id = constants.FLAT_VLAN_ID - elif network_type == svc_constants.TYPE_VLAN: - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - if not utils.is_valid_vlan_tag(segmentation_id): - msg = (_("provider:segmentation_id out of range " - "(%(min_id)s through %(max_id)s)") % - {'min_id': q_const.MIN_VLAN_TAG, - 'max_id': q_const.MAX_VLAN_TAG}) - raise n_exc.InvalidInput(error_message=msg) - elif network_type in constants.TUNNEL_NETWORK_TYPES: - if not self.enable_tunneling: - msg = _("%s networks are not enabled") % network_type - raise n_exc.InvalidInput(error_message=msg) - if physical_network_set: - msg = _("provider:physical_network specified for %s " - "network") % network_type - raise n_exc.InvalidInput(error_message=msg) - else: - physical_network = None - if not segmentation_id_set: - msg = _("provider:segmentation_id required") - raise n_exc.InvalidInput(error_message=msg) - elif network_type == svc_constants.TYPE_LOCAL: - if physical_network_set: - msg = _("provider:physical_network specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - else: - physical_network = None - if segmentation_id_set: - msg = _("provider:segmentation_id specified for local " - "network") - raise n_exc.InvalidInput(error_message=msg) - else: - segmentation_id = None - else: - msg = _("provider:network_type %s not supported") % network_type - raise n_exc.InvalidInput(error_message=msg) - - if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]: - if physical_network_set: - if physical_network not in self.network_vlan_ranges: - msg = _("Unknown provider:physical_network " - "%s") % physical_network - raise n_exc.InvalidInput(error_message=msg) - elif 'default' in self.network_vlan_ranges: - physical_network = 'default' - else: - msg = _("provider:physical_network required") - raise n_exc.InvalidInput(error_message=msg) - - return (network_type, physical_network, segmentation_id) - - def create_network(self, context, network): - (network_type, physical_network, - segmentation_id) = self._process_provider_create(context, - network['network']) - - session = context.session - #set up default security groups - tenant_id = self._get_tenant_id_for_create( - context, network['network']) - self._ensure_default_security_group(context, tenant_id) - - with session.begin(subtransactions=True): - if not network_type: - # tenant network - network_type = self.tenant_network_type - if network_type == svc_constants.TYPE_NONE: - raise n_exc.TenantNetworksDisabled() - elif network_type == svc_constants.TYPE_VLAN: - (physical_network, - segmentation_id) = ovs_db_v2.reserve_vlan(session) - elif network_type in constants.TUNNEL_NETWORK_TYPES: - segmentation_id = ovs_db_v2.reserve_tunnel(session) - # no reservation needed for TYPE_LOCAL - else: - # provider network - if network_type in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_FLAT]: - ovs_db_v2.reserve_specific_vlan(session, physical_network, - segmentation_id) - elif network_type in constants.TUNNEL_NETWORK_TYPES: - ovs_db_v2.reserve_specific_tunnel(session, segmentation_id) - # no reservation needed for TYPE_LOCAL - net = super(OVSNeutronPluginV2, self).create_network(context, - network) - binding = ovs_db_v2.add_network_binding(session, net['id'], - network_type, - physical_network, - segmentation_id) - - self._process_l3_create(context, net, network['network']) - # passing None as db model to use binding object - self._extend_network_dict_provider_ovs(net, None, binding) - # note - exception will rollback entire transaction - LOG.debug(_("Created network: %s"), net['id']) - return net - - def update_network(self, context, id, network): - provider._raise_if_updates_provider_attributes(network['network']) - - session = context.session - with session.begin(subtransactions=True): - net = super(OVSNeutronPluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - return net - - def delete_network(self, context, id): - session = context.session - with session.begin(subtransactions=True): - binding = ovs_db_v2.get_network_binding(session, id) - self._process_l3_delete(context, id) - super(OVSNeutronPluginV2, self).delete_network(context, id) - if binding.network_type in constants.TUNNEL_NETWORK_TYPES: - ovs_db_v2.release_tunnel(session, binding.segmentation_id, - self.tunnel_id_ranges) - elif binding.network_type in [svc_constants.TYPE_VLAN, - svc_constants.TYPE_FLAT]: - ovs_db_v2.release_vlan(session, binding.physical_network, - binding.segmentation_id, - self.network_vlan_ranges) - # the network_binding record is deleted via cascade from - # the network record, so explicit removal is not necessary - self.notifier.network_delete(context, id) - - def get_network(self, context, id, fields=None): - session = context.session - with session.begin(subtransactions=True): - net = super(OVSNeutronPluginV2, self).get_network(context, - id, None) - return self._fields(net, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, - limit=None, marker=None, page_reverse=False): - session = context.session - with session.begin(subtransactions=True): - nets = super(OVSNeutronPluginV2, - self).get_networks(context, filters, None, sorts, - limit, marker, page_reverse) - - return [self._fields(net, fields) for net in nets] - - def create_port(self, context, port): - # Set port status as 'DOWN'. This will be updated by agent - port['port']['status'] = q_const.PORT_STATUS_DOWN - port_data = port['port'] - session = context.session - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, []) - port = super(OVSNeutronPluginV2, self).create_port(context, port) - self._process_portbindings_create_and_update(context, - port_data, port) - self._process_port_create_security_group(context, port, sgids) - self._process_port_create_extra_dhcp_opts(context, port, - dhcp_opts) - port[addr_pair.ADDRESS_PAIRS] = ( - self._process_create_allowed_address_pairs( - context, port, - port_data.get(addr_pair.ADDRESS_PAIRS))) - self.notify_security_groups_member_updated(context, port) - return port - - def update_port(self, context, id, port): - session = context.session - need_port_update_notify = False - with session.begin(subtransactions=True): - original_port = super(OVSNeutronPluginV2, self).get_port( - context, id) - updated_port = super(OVSNeutronPluginV2, self).update_port( - context, id, port) - if addr_pair.ADDRESS_PAIRS in port['port']: - need_port_update_notify |= ( - self.update_address_pairs_on_port(context, id, port, - original_port, - updated_port)) - need_port_update_notify |= self.update_security_group_on_port( - context, id, port, original_port, updated_port) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - need_port_update_notify |= self._update_extra_dhcp_opts_on_port( - context, id, port, updated_port) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - if original_port['admin_state_up'] != updated_port['admin_state_up']: - need_port_update_notify = True - - if need_port_update_notify: - binding = ovs_db_v2.get_network_binding(None, - updated_port['network_id']) - self.notifier.port_update(context, updated_port, - binding.network_type, - binding.segmentation_id, - binding.physical_network) - return updated_port - - def delete_port(self, context, id, l3_port_check=True): - - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - - session = context.session - with session.begin(subtransactions=True): - self.disassociate_floatingips(context, id) - port = self.get_port(context, id) - self._delete_port_security_group_bindings(context, id) - super(OVSNeutronPluginV2, self).delete_port(context, id) - - self.notify_security_groups_member_updated(context, port) diff --git a/neutron/plugins/plumgrid/README b/neutron/plugins/plumgrid/README deleted file mode 100644 index e7118307d..000000000 --- a/neutron/plugins/plumgrid/README +++ /dev/null @@ -1,8 +0,0 @@ -PLUMgrid Neutron Plugin for Virtual Network Infrastructure (VNI) - -This plugin implements Neutron v2 APIs and helps configure -L2/L3 virtual networks consisting of PLUMgrid Platform. -Implements External Networks and Port Binding Extension - -For more details on use please refer to: -http://wiki.openstack.org/PLUMgrid-Neutron diff --git a/neutron/plugins/plumgrid/__init__.py b/neutron/plugins/plumgrid/__init__.py deleted file mode 100644 index 39e9b8d13..000000000 --- a/neutron/plugins/plumgrid/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/common/__init__.py b/neutron/plugins/plumgrid/common/__init__.py deleted file mode 100644 index 39e9b8d13..000000000 --- a/neutron/plugins/plumgrid/common/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/common/exceptions.py b/neutron/plugins/plumgrid/common/exceptions.py deleted file mode 100644 index b2862f9cc..000000000 --- a/neutron/plugins/plumgrid/common/exceptions.py +++ /dev/null @@ -1,30 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. - - -"""Neutron PLUMgrid Plugin exceptions""" - -from neutron.common import exceptions as base_exec - - -class PLUMgridException(base_exec.NeutronException): - message = _("PLUMgrid Plugin Error: %(err_msg)s") - - -class PLUMgridConnectionFailed(PLUMgridException): - message = _("Connection failed with PLUMgrid Director: %(err_msg)s") diff --git a/neutron/plugins/plumgrid/drivers/__init__.py b/neutron/plugins/plumgrid/drivers/__init__.py deleted file mode 100644 index 09cf65a01..000000000 --- a/neutron/plugins/plumgrid/drivers/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/drivers/fake_plumlib.py b/neutron/plugins/plumgrid/drivers/fake_plumlib.py deleted file mode 100644 index bf0cb9f9a..000000000 --- a/neutron/plugins/plumgrid/drivers/fake_plumlib.py +++ /dev/null @@ -1,99 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. - -from neutron.extensions import providernet as provider -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class Plumlib(): - """ - Class PLUMgrid Fake Library. This library is a by-pass implementation - for the PLUMgrid Library. This class is being used by the unit test - integration in Neutron. - """ - - def __init__(self): - LOG.info(_('Python PLUMgrid Fake Library Started ')) - pass - - def director_conn(self, director_plumgrid, director_port, timeout, - director_admin, director_password): - LOG.info(_('Fake Director: %s'), - director_plumgrid + ':' + director_port) - pass - - def create_network(self, tenant_id, net_db, network): - net_db["network"] = {} - for key in (provider.NETWORK_TYPE, - provider.PHYSICAL_NETWORK, - provider.SEGMENTATION_ID): - net_db["network"][key] = network["network"][key] - return net_db - - def update_network(self, tenant_id, net_id): - pass - - def delete_network(self, net_db, net_id): - pass - - def create_subnet(self, sub_db, net_db, ipnet): - pass - - def update_subnet(self, orig_sub_db, new_sub_db, ipnet): - pass - - def delete_subnet(self, tenant_id, net_db, net_id): - pass - - def create_port(self, port_db, router_db): - pass - - def update_port(self, port_db, router_db): - pass - - def delete_port(self, port_db, router_db): - pass - - def create_router(self, tenant_id, router_db): - pass - - def update_router(self, router_db, router_id): - pass - - def delete_router(self, tenant_id, router_id): - pass - - def add_router_interface(self, tenant_id, router_id, port_db, ipnet): - pass - - def remove_router_interface(self, tenant_id, net_id, router_id): - pass - - def create_floatingip(self, floating_ip): - pass - - def update_floatingip(self, floating_ip_orig, floating_ip, id): - pass - - def delete_floatingip(self, floating_ip_orig, id): - pass - - def disassociate_floatingips(self, fip, port_id): - return dict((key, fip[key]) for key in ("id", "floating_network_id", - "floating_ip_address")) diff --git a/neutron/plugins/plumgrid/drivers/plumlib.py b/neutron/plugins/plumgrid/drivers/plumlib.py deleted file mode 100644 index 8e2607e85..000000000 --- a/neutron/plugins/plumgrid/drivers/plumlib.py +++ /dev/null @@ -1,100 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. - -""" -Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) -This plugin will forward authenticated REST API calls -to the PLUMgrid Network Management System called Director -""" - -from plumgridlib import plumlib - -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -class Plumlib(object): - """ - Class PLUMgrid Python Library. This library is a third-party tool - needed by PLUMgrid plugin to implement all core API in Neutron. - """ - - def __init__(self): - LOG.info(_('Python PLUMgrid Library Started ')) - - def director_conn(self, director_plumgrid, director_port, timeout, - director_admin, director_password): - self.plumlib = plumlib.Plumlib(director_plumgrid, - director_port, - timeout, - director_admin, - director_password) - - def create_network(self, tenant_id, net_db, network): - self.plumlib.create_network(tenant_id, net_db, network) - - def update_network(self, tenant_id, net_id): - self.plumlib.update_network(tenant_id, net_id) - - def delete_network(self, net_db, net_id): - self.plumlib.delete_network(net_db, net_id) - - def create_subnet(self, sub_db, net_db, ipnet): - self.plumlib.create_subnet(sub_db, net_db, ipnet) - - def update_subnet(self, orig_sub_db, new_sub_db, ipnet): - self.plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) - - def delete_subnet(self, tenant_id, net_db, net_id): - self.plumlib.delete_subnet(tenant_id, net_db, net_id) - - def create_port(self, port_db, router_db): - self.plumlib.create_port(port_db, router_db) - - def update_port(self, port_db, router_db): - self.plumlib.update_port(port_db, router_db) - - def delete_port(self, port_db, router_db): - self.plumlib.delete_port(port_db, router_db) - - def create_router(self, tenant_id, router_db): - self.plumlib.create_router(tenant_id, router_db) - - def update_router(self, router_db, router_id): - self.plumlib.update_router(router_db, router_id) - - def delete_router(self, tenant_id, router_id): - self.plumlib.delete_router(tenant_id, router_id) - - def add_router_interface(self, tenant_id, router_id, port_db, ipnet): - self.plumlib.add_router_interface(tenant_id, router_id, port_db, ipnet) - - def remove_router_interface(self, tenant_id, net_id, router_id): - self.plumlib.remove_router_interface(tenant_id, net_id, router_id) - - def create_floatingip(self, floating_ip): - self.plumlib.create_floatingip(floating_ip) - - def update_floatingip(self, floating_ip_orig, floating_ip, id): - self.plumlib.update_floatingip(floating_ip_orig, floating_ip, id) - - def delete_floatingip(self, floating_ip_orig, id): - self.plumlib.delete_floatingip(floating_ip_orig, id) - - def disassociate_floatingips(self, floating_ip, port_id): - self.plumlib.disassociate_floatingips(floating_ip, port_id) diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py b/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py deleted file mode 100644 index 39e9b8d13..000000000 --- a/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py b/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py deleted file mode 100644 index 5a47438c1..000000000 --- a/neutron/plugins/plumgrid/plumgrid_plugin/plugin_ver.py +++ /dev/null @@ -1,19 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. - -VERSION = "0.2" diff --git a/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py b/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py deleted file mode 100644 index d28b90ae0..000000000 --- a/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py +++ /dev/null @@ -1,604 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 PLUMgrid, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc. - -""" -Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI) -This plugin will forward authenticated REST API calls -to the PLUMgrid Network Management System called Director -""" - -import netaddr -from oslo.config import cfg -from sqlalchemy.orm import exc as sa_exc - -from neutron.api.v2 import attributes -from neutron.common import constants -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import l3_db -from neutron.db import portbindings_db -from neutron.db import quota_db # noqa -from neutron.extensions import portbindings -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.plumgrid.common import exceptions as plum_excep -from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver - -LOG = logging.getLogger(__name__) - -director_server_opts = [ - cfg.StrOpt('director_server', default='localhost', - help=_("PLUMgrid Director server to connect to")), - cfg.StrOpt('director_server_port', default='8080', - help=_("PLUMgrid Director server port to connect to")), - cfg.StrOpt('username', default='username', - help=_("PLUMgrid Director admin username")), - cfg.StrOpt('password', default='password', secret=True, - help=_("PLUMgrid Director admin password")), - cfg.IntOpt('servertimeout', default=5, - help=_("PLUMgrid Director server timeout")), - cfg.StrOpt('driver', - default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib", - help=_("PLUMgrid Driver")), ] - -cfg.CONF.register_opts(director_server_opts, "plumgriddirector") - - -class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2, - portbindings_db.PortBindingMixin, - external_net_db.External_net_db_mixin, - l3_db.L3_NAT_db_mixin): - - supported_extension_aliases = ["external-net", "router", "binding", - "quotas", "provider"] - - binding_view = "extension:port_binding:view" - binding_set = "extension:port_binding:set" - - def __init__(self): - LOG.info(_('Neutron PLUMgrid Director: Starting Plugin')) - - super(NeutronPluginPLUMgridV2, self).__init__() - self.plumgrid_init() - - LOG.debug(_('Neutron PLUMgrid Director: Neutron server with ' - 'PLUMgrid Plugin has started')) - - def plumgrid_init(self): - """PLUMgrid initialization.""" - director_plumgrid = cfg.CONF.plumgriddirector.director_server - director_port = cfg.CONF.plumgriddirector.director_server_port - director_admin = cfg.CONF.plumgriddirector.username - director_password = cfg.CONF.plumgriddirector.password - timeout = cfg.CONF.plumgriddirector.servertimeout - plum_driver = cfg.CONF.plumgriddirector.driver - - # PLUMgrid Director info validation - LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid) - self._plumlib = importutils.import_object(plum_driver) - self._plumlib.director_conn(director_plumgrid, director_port, timeout, - director_admin, director_password) - - def create_network(self, context, network): - """Create Neutron network. - - Creates a PLUMgrid-based bridge. - """ - - LOG.debug(_('Neutron PLUMgrid Director: create_network() called')) - - # Plugin DB - Network Create and validation - tenant_id = self._get_tenant_id_for_create(context, - network["network"]) - self._network_admin_state(network) - - with context.session.begin(subtransactions=True): - net_db = super(NeutronPluginPLUMgridV2, - self).create_network(context, network) - # Propagate all L3 data into DB - self._process_l3_create(context, net_db, network['network']) - - try: - LOG.debug(_('PLUMgrid Library: create_network() called')) - self._plumlib.create_network(tenant_id, net_db, network) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return created network - return net_db - - def update_network(self, context, net_id, network): - """Update Neutron network. - - Updates a PLUMgrid-based bridge. - """ - - LOG.debug(_("Neutron PLUMgrid Director: update_network() called")) - self._network_admin_state(network) - tenant_id = self._get_tenant_id_for_create(context, network["network"]) - - with context.session.begin(subtransactions=True): - # Plugin DB - Network Update - net_db = super( - NeutronPluginPLUMgridV2, self).update_network(context, - net_id, network) - self._process_l3_update(context, net_db, network['network']) - - try: - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.update_network(tenant_id, net_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return updated network - return net_db - - def delete_network(self, context, net_id): - """Delete Neutron network. - - Deletes a PLUMgrid-based bridge. - """ - - LOG.debug(_("Neutron PLUMgrid Director: delete_network() called")) - net_db = super(NeutronPluginPLUMgridV2, - self).get_network(context, net_id) - - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, net_id) - # Plugin DB - Network Delete - super(NeutronPluginPLUMgridV2, self).delete_network(context, - net_id) - - try: - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.delete_network(net_db, net_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def create_port(self, context, port): - """Create Neutron port. - - Creates a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - LOG.debug(_("Neutron PLUMgrid Director: create_port() called")) - - # Port operations on PLUMgrid Director is an automatic operation - # from the VIF driver operations in Nova. - # It requires admin_state_up to be True - - port["port"]["admin_state_up"] = True - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, self).create_port(context, - port) - device_id = port_db["device_id"] - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - router_db = self._get_router(context, device_id) - else: - router_db = None - - try: - LOG.debug(_("PLUMgrid Library: create_port() called")) - self._plumlib.create_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Plugin DB - Port Create and Return port - return self._port_viftype_binding(context, port_db) - - def update_port(self, context, port_id, port): - """Update Neutron port. - - Updates a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - LOG.debug(_("Neutron PLUMgrid Director: update_port() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, self).update_port( - context, port_id, port) - device_id = port_db["device_id"] - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - router_db = self._get_router(context, device_id) - else: - router_db = None - try: - LOG.debug(_("PLUMgrid Library: create_port() called")) - self._plumlib.update_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Plugin DB - Port Update - return self._port_viftype_binding(context, port_db) - - def delete_port(self, context, port_id, l3_port_check=True): - """Delete Neutron port. - - Deletes a PLUMgrid-based port on the specific Virtual Network - Function (VNF). - """ - - LOG.debug(_("Neutron PLUMgrid Director: delete_port() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Port Create and Return port - port_db = super(NeutronPluginPLUMgridV2, - self).get_port(context, port_id) - self.disassociate_floatingips(context, port_id) - super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id) - - if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW: - device_id = port_db["device_id"] - router_db = self._get_router(context, device_id) - else: - router_db = None - try: - LOG.debug(_("PLUMgrid Library: delete_port() called")) - self._plumlib.delete_port(port_db, router_db) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def get_port(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - port_db = super(NeutronPluginPLUMgridV2, - self).get_port(context, id, fields) - - self._port_viftype_binding(context, port_db) - return self._fields(port_db, fields) - - def get_ports(self, context, filters=None, fields=None): - with context.session.begin(subtransactions=True): - ports_db = super(NeutronPluginPLUMgridV2, - self).get_ports(context, filters, fields) - for port_db in ports_db: - self._port_viftype_binding(context, port_db) - return [self._fields(port, fields) for port in ports_db] - - def create_subnet(self, context, subnet): - """Create Neutron subnet. - - Creates a PLUMgrid-based DHCP and NAT Virtual Network - Functions (VNFs). - """ - - LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called")) - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Create - net_db = super(NeutronPluginPLUMgridV2, self).get_network( - context, subnet['subnet']['network_id'], fields=None) - s = subnet['subnet'] - ipnet = netaddr.IPNetwork(s['cidr']) - - # PLUMgrid Director reserves the last IP address for GW - # when is not defined - if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED: - gw_ip = str(netaddr.IPAddress(ipnet.last - 1)) - subnet['subnet']['gateway_ip'] = gw_ip - - # PLUMgrid reserves the first IP - if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED: - allocation_pool = self._allocate_pools_for_subnet(context, s) - subnet['subnet']['allocation_pools'] = allocation_pool - - sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet( - context, subnet) - - try: - LOG.debug(_("PLUMgrid Library: create_subnet() called")) - self._plumlib.create_subnet(sub_db, net_db, ipnet) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return sub_db - - def delete_subnet(self, context, subnet_id): - """Delete subnet core Neutron API.""" - - LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called")) - # Collecting subnet info - sub_db = self._get_subnet(context, subnet_id) - tenant_id = self._get_tenant_id_for_create(context, subnet_id) - net_id = sub_db["network_id"] - net_db = self.get_network(context, net_id) - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Delete - super(NeutronPluginPLUMgridV2, self).delete_subnet( - context, subnet_id) - try: - LOG.debug(_("PLUMgrid Library: delete_subnet() called")) - self._plumlib.delete_subnet(tenant_id, net_db, net_id) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def update_subnet(self, context, subnet_id, subnet): - """Update subnet core Neutron API.""" - - LOG.debug(_("update_subnet() called")) - # Collecting subnet info - orig_sub_db = self._get_subnet(context, subnet_id) - - with context.session.begin(subtransactions=True): - # Plugin DB - Subnet Update - new_sub_db = super(NeutronPluginPLUMgridV2, - self).update_subnet(context, subnet_id, subnet) - ipnet = netaddr.IPNetwork(new_sub_db['cidr']) - - try: - # PLUMgrid Server does not support updating resources yet - LOG.debug(_("PLUMgrid Library: update_network() called")) - self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return new_sub_db - - def create_router(self, context, router): - """ - Create router extension Neutron API - """ - LOG.debug(_("Neutron PLUMgrid Director: create_router() called")) - - tenant_id = self._get_tenant_id_for_create(context, router["router"]) - - with context.session.begin(subtransactions=True): - - # Create router in DB - router_db = super(NeutronPluginPLUMgridV2, - self).create_router(context, router) - # Create router on the network controller - try: - # Add Router to VND - LOG.debug(_("PLUMgrid Library: create_router() called")) - self._plumlib.create_router(tenant_id, router_db) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return created router - return router_db - - def update_router(self, context, router_id, router): - - LOG.debug(_("Neutron PLUMgrid Director: update_router() called")) - - with context.session.begin(subtransactions=True): - router_db = super(NeutronPluginPLUMgridV2, - self).update_router(context, router_id, router) - try: - LOG.debug(_("PLUMgrid Library: update_router() called")) - self._plumlib.update_router(router_db, router_id) - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - # Return updated router - return router_db - - def delete_router(self, context, router_id): - LOG.debug(_("Neutron PLUMgrid Director: delete_router() called")) - - with context.session.begin(subtransactions=True): - orig_router = self._get_router(context, router_id) - tenant_id = orig_router["tenant_id"] - - super(NeutronPluginPLUMgridV2, self).delete_router(context, - router_id) - - try: - LOG.debug(_("PLUMgrid Library: delete_router() called")) - self._plumlib.delete_router(tenant_id, router_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def add_router_interface(self, context, router_id, interface_info): - - LOG.debug(_("Neutron PLUMgrid Director: " - "add_router_interface() called")) - with context.session.begin(subtransactions=True): - # Validate args - router_db = self._get_router(context, router_id) - tenant_id = router_db['tenant_id'] - - # Create interface in DB - int_router = super(NeutronPluginPLUMgridV2, - self).add_router_interface(context, - router_id, - interface_info) - port_db = self._get_port(context, int_router['port_id']) - subnet_id = port_db["fixed_ips"][0]["subnet_id"] - subnet_db = super(NeutronPluginPLUMgridV2, - self)._get_subnet(context, subnet_id) - ipnet = netaddr.IPNetwork(subnet_db['cidr']) - - # Create interface on the network controller - try: - LOG.debug(_("PLUMgrid Library: add_router_interface() called")) - self._plumlib.add_router_interface(tenant_id, router_id, - port_db, ipnet) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return int_router - - def remove_router_interface(self, context, router_id, int_info): - - LOG.debug(_("Neutron PLUMgrid Director: " - "remove_router_interface() called")) - with context.session.begin(subtransactions=True): - # Validate args - router_db = self._get_router(context, router_id) - tenant_id = router_db['tenant_id'] - if 'port_id' in int_info: - port = self._get_port(context, int_info['port_id']) - net_id = port['network_id'] - - elif 'subnet_id' in int_info: - subnet_id = int_info['subnet_id'] - subnet = self._get_subnet(context, subnet_id) - net_id = subnet['network_id'] - - # Remove router in DB - del_int_router = super(NeutronPluginPLUMgridV2, - self).remove_router_interface(context, - router_id, - int_info) - - try: - LOG.debug(_("PLUMgrid Library: " - "remove_router_interface() called")) - self._plumlib.remove_router_interface(tenant_id, - net_id, router_id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return del_int_router - - def create_floatingip(self, context, floatingip): - LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called")) - - with context.session.begin(subtransactions=True): - - floating_ip = super(NeutronPluginPLUMgridV2, - self).create_floatingip(context, floatingip) - try: - LOG.debug(_("PLUMgrid Library: create_floatingip() called")) - self._plumlib.create_floatingip(floating_ip) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return floating_ip - - def update_floatingip(self, context, id, floatingip): - LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called")) - - with context.session.begin(subtransactions=True): - floating_ip_orig = super(NeutronPluginPLUMgridV2, - self).get_floatingip(context, id) - floating_ip = super(NeutronPluginPLUMgridV2, - self).update_floatingip(context, id, - floatingip) - try: - LOG.debug(_("PLUMgrid Library: update_floatingip() called")) - self._plumlib.update_floatingip(floating_ip_orig, floating_ip, - id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - return floating_ip - - def delete_floatingip(self, context, id): - LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called")) - - with context.session.begin(subtransactions=True): - - floating_ip_orig = super(NeutronPluginPLUMgridV2, - self).get_floatingip(context, id) - - super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id) - - try: - LOG.debug(_("PLUMgrid Library: delete_floatingip() called")) - self._plumlib.delete_floatingip(floating_ip_orig, id) - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - def disassociate_floatingips(self, context, port_id): - LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() " - "called")) - - try: - fip_qry = context.session.query(l3_db.FloatingIP) - floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one() - - LOG.debug(_("PLUMgrid Library: disassociate_floatingips()" - " called")) - self._plumlib.disassociate_floatingips(floating_ip, port_id) - - except sa_exc.NoResultFound: - pass - - except Exception as err_message: - raise plum_excep.PLUMgridException(err_msg=err_message) - - super(NeutronPluginPLUMgridV2, - self).disassociate_floatingips(context, port_id) - - """ - Internal PLUMgrid Fuctions - """ - - def _get_plugin_version(self): - return plugin_ver.VERSION - - def _port_viftype_binding(self, context, port): - port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR - port[portbindings.VIF_DETAILS] = { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases} - return port - - def _network_admin_state(self, network): - if network["network"].get("admin_state_up") is False: - LOG.warning(_("Networks with admin_state_up=False are not " - "supported by PLUMgrid plugin yet.")) - return network - - def _allocate_pools_for_subnet(self, context, subnet): - """Create IP allocation pools for a given subnet - - Pools are defined by the 'allocation_pools' attribute, - a list of dict objects with 'start' and 'end' keys for - defining the pool range. - Modified from Neutron DB based class - - """ - - pools = [] - # Auto allocate the pool around gateway_ip - net = netaddr.IPNetwork(subnet['cidr']) - first_ip = net.first + 2 - last_ip = net.last - 1 - gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last)) - # Use the gw_ip to find a point for splitting allocation pools - # for this subnet - split_ip = min(max(gw_ip, net.first), net.last) - if split_ip > first_ip: - pools.append({'start': str(netaddr.IPAddress(first_ip)), - 'end': str(netaddr.IPAddress(split_ip - 1))}) - if split_ip < last_ip: - pools.append({'start': str(netaddr.IPAddress(split_ip + 1)), - 'end': str(netaddr.IPAddress(last_ip))}) - # return auto-generated pools - # no need to check for their validity - return pools diff --git a/neutron/plugins/ryu/README b/neutron/plugins/ryu/README deleted file mode 100644 index 054c69a86..000000000 --- a/neutron/plugins/ryu/README +++ /dev/null @@ -1,22 +0,0 @@ -Neutron plugin for Ryu Network Operating System -This directory includes neutron plugin for Ryu Network Operating System. - -# -- Installation - -For how to install/set up this plugin with Ryu and OpenStack, please refer to -https://github.com/osrg/ryu/wiki/OpenStack - -# -- Ryu General - -For general Ryu stuff, please refer to -http://www.osrg.net/ryu/ - -Ryu is available at github -git://github.com/osrg/ryu.git -https://github.com/osrg/ryu - -The mailing is at -ryu-devel@lists.sourceforge.net -https://lists.sourceforge.net/lists/listinfo/ryu-devel - -Enjoy! diff --git a/neutron/plugins/ryu/__init__.py b/neutron/plugins/ryu/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ryu/agent/__init__.py b/neutron/plugins/ryu/agent/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ryu/agent/ryu_neutron_agent.py b/neutron/plugins/ryu/agent/ryu_neutron_agent.py deleted file mode 100755 index d1fac3185..000000000 --- a/neutron/plugins/ryu/agent/ryu_neutron_agent.py +++ /dev/null @@ -1,314 +0,0 @@ -#!/usr/bin/env python -# Copyright 2012 Isaku Yamahata -# Based on openvswitch agent. -# -# Copyright 2011 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Isaku Yamahata - -import httplib -import socket -import sys -import time - -import eventlet -eventlet.monkey_patch() - -from oslo.config import cfg -from ryu.app import client -from ryu.app import conf_switch_key -from ryu.app import rest_nw_id - -from neutron.agent.linux import ip_lib -from neutron.agent.linux import ovs_lib -from neutron.agent import rpc as agent_rpc -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import config as common_config -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron import context as q_context -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common import log -from neutron.plugins.ryu.common import config # noqa - - -LOG = log.getLogger(__name__) - - -# This is copied of nova.flags._get_my_ip() -# Agent shouldn't depend on nova module -def _get_my_ip(): - """Return the actual ip of the local machine. - - This code figures out what source address would be used if some traffic - were to be sent out to some well known address on the Internet. In this - case, a Google DNS server is used, but the specific address does not - matter much. No traffic is actually sent. - """ - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, _port) = csock.getsockname() - csock.close() - return addr - - -def _get_ip_from_nic(nic): - ip_wrapper = ip_lib.IPWrapper() - dev = ip_wrapper.device(nic) - addrs = dev.addr.list(scope='global') - for addr in addrs: - if addr['ip_version'] == 4: - return addr['cidr'].split('/')[0] - - -def _get_ip(cfg_ip_str, cfg_interface_str): - ip = None - try: - ip = getattr(cfg.CONF.OVS, cfg_ip_str) - except (cfg.NoSuchOptError, cfg.NoSuchGroupError): - pass - if ip: - return ip - - iface = None - try: - iface = getattr(cfg.CONF.OVS, cfg_interface_str) - except (cfg.NoSuchOptError, cfg.NoSuchGroupError): - pass - if iface: - ip = _get_ip_from_nic(iface) - if ip: - return ip - LOG.warning(_('Could not get IPv4 address from %(nic)s: %(cfg)s'), - {'nic': iface, 'cfg': cfg_interface_str}) - - return _get_my_ip() - - -def _get_tunnel_ip(): - return _get_ip('tunnel_ip', 'tunnel_interface') - - -def _get_ovsdb_ip(): - return _get_ip('ovsdb_ip', 'ovsdb_interface') - - -class OVSBridge(ovs_lib.OVSBridge): - def __init__(self, br_name, root_helper): - ovs_lib.OVSBridge.__init__(self, br_name, root_helper) - self.datapath_id = None - - def find_datapath_id(self): - self.datapath_id = self.get_datapath_id() - - def set_manager(self, target): - self.run_vsctl(["set-manager", target]) - - def get_ofport(self, name): - return self.db_get_val("Interface", name, "ofport") - - def _get_ports(self, get_port): - ports = [] - port_names = self.get_port_name_list() - for name in port_names: - if self.get_ofport(name) < 0: - continue - port = get_port(name) - if port: - ports.append(port) - - return ports - - def _get_external_port(self, name): - # exclude vif ports - external_ids = self.db_get_map("Interface", name, "external_ids") - if external_ids: - return - - # exclude tunnel ports - options = self.db_get_map("Interface", name, "options") - if "remote_ip" in options: - return - - ofport = self.get_ofport(name) - return ovs_lib.VifPort(name, ofport, None, None, self) - - def get_external_ports(self): - return self._get_ports(self._get_external_port) - - -class VifPortSet(object): - def __init__(self, int_br, ryu_rest_client): - super(VifPortSet, self).__init__() - self.int_br = int_br - self.api = ryu_rest_client - - def setup(self): - for port in self.int_br.get_external_ports(): - LOG.debug(_('External port %s'), port) - self.api.update_port(rest_nw_id.NW_ID_EXTERNAL, - port.switch.datapath_id, port.ofport) - - -class RyuPluginApi(agent_rpc.PluginApi, - sg_rpc.SecurityGroupServerRpcApiMixin): - def get_ofp_rest_api_addr(self, context): - LOG.debug(_("Get Ryu rest API address")) - return self.call(context, - self.make_msg('get_ofp_rest_api'), - topic=self.topic) - - -class RyuSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin): - def __init__(self, context, plugin_rpc, root_helper): - self.context = context - self.plugin_rpc = plugin_rpc - self.root_helper = root_helper - self.init_firewall() - - -class OVSNeutronOFPRyuAgent(rpc_compat.RpcCallback, - sg_rpc.SecurityGroupAgentRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - def __init__(self, integ_br, tunnel_ip, ovsdb_ip, ovsdb_port, - polling_interval, root_helper): - super(OVSNeutronOFPRyuAgent, self).__init__() - self.polling_interval = polling_interval - self._setup_rpc() - self.sg_agent = RyuSecurityGroupAgent(self.context, - self.plugin_rpc, - root_helper) - self._setup_integration_br(root_helper, integ_br, tunnel_ip, - ovsdb_port, ovsdb_ip) - - def _setup_rpc(self): - self.topic = topics.AGENT - self.plugin_rpc = RyuPluginApi(topics.PLUGIN) - self.context = q_context.get_admin_context_without_session() - self.endpoints = [self] - consumers = [[topics.PORT, topics.UPDATE], - [topics.SECURITY_GROUP, topics.UPDATE]] - self.connection = agent_rpc.create_consumers(self.endpoints, - self.topic, - consumers) - - def _setup_integration_br(self, root_helper, integ_br, - tunnel_ip, ovsdb_port, ovsdb_ip): - self.int_br = OVSBridge(integ_br, root_helper) - self.int_br.find_datapath_id() - - rest_api_addr = self.plugin_rpc.get_ofp_rest_api_addr(self.context) - if not rest_api_addr: - raise n_exc.Invalid(_("Ryu rest API port isn't specified")) - LOG.debug(_("Going to ofp controller mode %s"), rest_api_addr) - - ryu_rest_client = client.OFPClient(rest_api_addr) - - self.vif_ports = VifPortSet(self.int_br, ryu_rest_client) - self.vif_ports.setup() - - sc_client = client.SwitchConfClient(rest_api_addr) - sc_client.set_key(self.int_br.datapath_id, - conf_switch_key.OVS_TUNNEL_ADDR, tunnel_ip) - - # Currently Ryu supports only tcp methods. (ssl isn't supported yet) - self.int_br.set_manager('ptcp:%d' % ovsdb_port) - sc_client.set_key(self.int_br.datapath_id, conf_switch_key.OVSDB_ADDR, - 'tcp:%s:%d' % (ovsdb_ip, ovsdb_port)) - - def port_update(self, context, **kwargs): - LOG.debug(_("Port update received")) - port = kwargs.get('port') - vif_port = self.int_br.get_vif_port_by_id(port['id']) - if not vif_port: - return - - if ext_sg.SECURITYGROUPS in port: - self.sg_agent.refresh_firewall() - - def _update_ports(self, registered_ports): - ports = self.int_br.get_vif_port_set() - if ports == registered_ports: - return - added = ports - registered_ports - removed = registered_ports - ports - return {'current': ports, - 'added': added, - 'removed': removed} - - def _process_devices_filter(self, port_info): - if 'added' in port_info: - self.sg_agent.prepare_devices_filter(port_info['added']) - if 'removed' in port_info: - self.sg_agent.remove_devices_filter(port_info['removed']) - - def daemon_loop(self): - ports = set() - - while True: - start = time.time() - try: - port_info = self._update_ports(ports) - if port_info: - LOG.debug(_("Agent loop has new device")) - self._process_devices_filter(port_info) - ports = port_info['current'] - except Exception: - LOG.exception(_("Error in agent event loop")) - - elapsed = max(time.time() - start, 0) - if (elapsed < self.polling_interval): - time.sleep(self.polling_interval - elapsed) - else: - LOG.debug(_("Loop iteration exceeded interval " - "(%(polling_interval)s vs. %(elapsed)s)!"), - {'polling_interval': self.polling_interval, - 'elapsed': elapsed}) - - -def main(): - common_config.init(sys.argv[1:]) - - common_config.setup_logging(cfg.CONF) - - integ_br = cfg.CONF.OVS.integration_bridge - polling_interval = cfg.CONF.AGENT.polling_interval - root_helper = cfg.CONF.AGENT.root_helper - - tunnel_ip = _get_tunnel_ip() - LOG.debug(_('tunnel_ip %s'), tunnel_ip) - ovsdb_port = cfg.CONF.OVS.ovsdb_port - LOG.debug(_('ovsdb_port %s'), ovsdb_port) - ovsdb_ip = _get_ovsdb_ip() - LOG.debug(_('ovsdb_ip %s'), ovsdb_ip) - try: - agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip, - ovsdb_port, polling_interval, - root_helper) - except httplib.HTTPException as e: - LOG.error(_("Initialization failed: %s"), e) - sys.exit(1) - - LOG.info(_("Ryu initialization on the node is done. " - "Agent initialized successfully, now running...")) - agent.daemon_loop() - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/ryu/common/__init__.py b/neutron/plugins/ryu/common/__init__.py deleted file mode 100644 index e5f41adfe..000000000 --- a/neutron/plugins/ryu/common/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/ryu/common/config.py b/neutron/plugins/ryu/common/config.py deleted file mode 100644 index 504166d58..000000000 --- a/neutron/plugins/ryu/common/config.py +++ /dev/null @@ -1,52 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.agent.common import config -from neutron.agent.linux import ovs_lib # noqa - -ovs_opts = [ - cfg.StrOpt('integration_bridge', default='br-int', - help=_("Integration bridge to use")), - cfg.StrOpt('openflow_rest_api', default='127.0.0.1:8080', - help=_("OpenFlow REST API location")), - cfg.IntOpt('tunnel_key_min', default=1, - help=_("Minimum tunnel ID to use")), - cfg.IntOpt('tunnel_key_max', default=0xffffff, - help=_("Maximum tunnel ID to use")), - cfg.StrOpt('tunnel_ip', - help=_("Tunnel IP to use")), - cfg.StrOpt('tunnel_interface', - help=_("Tunnel interface to use")), - cfg.IntOpt('ovsdb_port', default=6634, - help=_("OVSDB port to connect to")), - cfg.StrOpt('ovsdb_ip', - help=_("OVSDB IP to connect to")), - cfg.StrOpt('ovsdb_interface', - help=_("OVSDB interface to connect to")), -] - -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), -] - - -cfg.CONF.register_opts(ovs_opts, "OVS") -cfg.CONF.register_opts(agent_opts, "AGENT") -config.register_root_helper(cfg.CONF) diff --git a/neutron/plugins/ryu/db/__init__.py b/neutron/plugins/ryu/db/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/ryu/db/api_v2.py b/neutron/plugins/ryu/db/api_v2.py deleted file mode 100644 index df4c904b5..000000000 --- a/neutron/plugins/ryu/db/api_v2.py +++ /dev/null @@ -1,215 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 Isaku Yamahata -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import exc as sa_exc -from sqlalchemy import func -from sqlalchemy.orm import exc as orm_exc - -from neutron.common import exceptions as n_exc -import neutron.db.api as db -from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db -from neutron.extensions import securitygroup as ext_sg -from neutron import manager -from neutron.openstack.common import log as logging -from neutron.plugins.ryu.db import models_v2 as ryu_models_v2 - - -LOG = logging.getLogger(__name__) - - -def network_all_tenant_list(): - session = db.get_session() - return session.query(models_v2.Network).all() - - -def get_port_from_device(port_id): - LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id) - session = db.get_session() - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - - query = session.query(models_v2.Port, - sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, - models_v2.Port.id == sg_binding_port) - query = query.filter(models_v2.Port.id == port_id) - port_and_sgs = query.all() - if not port_and_sgs: - return None - port = port_and_sgs[0][0] - plugin = manager.NeutronManager.get_plugin() - port_dict = plugin._make_port_dict(port) - port_dict[ext_sg.SECURITYGROUPS] = [ - sg_id for port_, sg_id in port_and_sgs if sg_id] - port_dict['security_group_rules'] = [] - port_dict['security_group_source_groups'] = [] - port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] - return port_dict - - -class TunnelKey(object): - # VLAN: 12 bits - # GRE, VXLAN: 24bits - # TODO(yamahata): STT: 64bits - _KEY_MIN_HARD = 1 - _KEY_MAX_HARD = 0xffffffff - - def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD): - self.key_min = key_min - self.key_max = key_max - - if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or - key_min > key_max): - raise ValueError(_('Invalid tunnel key options ' - 'tunnel_key_min: %(key_min)d ' - 'tunnel_key_max: %(key_max)d. ' - 'Using default value') % {'key_min': key_min, - 'key_max': key_max}) - - def _last_key(self, session): - try: - return session.query(ryu_models_v2.TunnelKeyLast).one() - except orm_exc.MultipleResultsFound: - max_key = session.query( - func.max(ryu_models_v2.TunnelKeyLast.last_key)) - if max_key > self.key_max: - max_key = self.key_min - - session.query(ryu_models_v2.TunnelKeyLast).delete() - last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key) - except orm_exc.NoResultFound: - last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min) - - session.add(last_key) - session.flush() - return session.query(ryu_models_v2.TunnelKeyLast).one() - - def _find_key(self, session, last_key): - """Try to find unused tunnel key. - - Trying to find unused tunnel key in TunnelKey table starting - from last_key + 1. - When all keys are used, raise sqlalchemy.orm.exc.NoResultFound - """ - # key 0 is used for special meanings. So don't allocate 0. - - # sqlite doesn't support - # '(select order by limit) union all (select order by limit) ' - # 'order by limit' - # So do it manually - # new_key = session.query("new_key").from_statement( - # # If last_key + 1 isn't used, it's the result - # 'SELECT new_key ' - # 'FROM (SELECT :last_key + 1 AS new_key) q1 ' - # 'WHERE NOT EXISTS ' - # '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' - # - # 'UNION ALL ' - # - # # if last_key + 1 used, - # # find the least unused key from last_key + 1 - # '(SELECT t.tunnel_key + 1 AS new_key ' - # 'FROM tunnelkeys t ' - # 'WHERE NOT EXISTS ' - # '(SELECT 1 FROM tunnelkeys ti ' - # ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' - # 'AND t.tunnel_key >= :last_key ' - # 'ORDER BY new_key LIMIT 1) ' - # - # 'ORDER BY new_key LIMIT 1' - # ).params(last_key=last_key).one() - try: - new_key = session.query("new_key").from_statement( - # If last_key + 1 isn't used, it's the result - 'SELECT new_key ' - 'FROM (SELECT :last_key + 1 AS new_key) q1 ' - 'WHERE NOT EXISTS ' - '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) ' - ).params(last_key=last_key).one() - except orm_exc.NoResultFound: - new_key = session.query("new_key").from_statement( - # if last_key + 1 used, - # find the least unused key from last_key + 1 - '(SELECT t.tunnel_key + 1 AS new_key ' - 'FROM tunnelkeys t ' - 'WHERE NOT EXISTS ' - '(SELECT 1 FROM tunnelkeys ti ' - ' WHERE ti.tunnel_key = t.tunnel_key + 1) ' - 'AND t.tunnel_key >= :last_key ' - 'ORDER BY new_key LIMIT 1) ' - ).params(last_key=last_key).one() - - new_key = new_key[0] # the result is tuple. - LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"), - {'last_key': last_key, 'new_key': new_key}) - if new_key > self.key_max: - LOG.debug(_("No key found")) - raise orm_exc.NoResultFound() - return new_key - - def _allocate(self, session, network_id): - last_key = self._last_key(session) - try: - new_key = self._find_key(session, last_key.last_key) - except orm_exc.NoResultFound: - new_key = self._find_key(session, self.key_min) - - tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id, - tunnel_key=new_key) - last_key.last_key = new_key - session.add(tunnel_key) - return new_key - - _TRANSACTION_RETRY_MAX = 16 - - def allocate(self, session, network_id): - count = 0 - while True: - session.begin(subtransactions=True) - try: - new_key = self._allocate(session, network_id) - session.commit() - break - except sa_exc.SQLAlchemyError: - session.rollback() - - count += 1 - if count > self._TRANSACTION_RETRY_MAX: - # if this happens too often, increase _TRANSACTION_RETRY_MAX - LOG.warn(_("Transaction retry exhausted (%d). " - "Abandoned tunnel key allocation."), count) - raise n_exc.ResourceExhausted() - - return new_key - - def delete(self, session, network_id): - session.query(ryu_models_v2.TunnelKey).filter_by( - network_id=network_id).delete() - session.flush() - - def all_list(self): - session = db.get_session() - return session.query(ryu_models_v2.TunnelKey).all() - - -def set_port_status(session, port_id, status): - try: - port = session.query(models_v2.Port).filter_by(id=port_id).one() - port['status'] = status - session.merge(port) - session.flush() - except orm_exc.NoResultFound: - raise n_exc.PortNotFound(port_id=port_id) diff --git a/neutron/plugins/ryu/db/models_v2.py b/neutron/plugins/ryu/db/models_v2.py deleted file mode 100644 index cf10e1732..000000000 --- a/neutron/plugins/ryu/db/models_v2.py +++ /dev/null @@ -1,41 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 Isaku Yamahata -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from neutron.db import model_base - - -class TunnelKeyLast(model_base.BASEV2): - """Last allocated Tunnel key. - - The next key allocation will be started from this value + 1 - """ - last_key = sa.Column(sa.Integer, primary_key=True) - - def __repr__(self): - return "" % self.last_key - - -class TunnelKey(model_base.BASEV2): - """Netowrk ID <-> tunnel key mapping.""" - network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"), - nullable=False) - tunnel_key = sa.Column(sa.Integer, primary_key=True, - nullable=False, autoincrement=False) - - def __repr__(self): - return "" % (self.network_id, self.tunnel_key) diff --git a/neutron/plugins/ryu/ryu_neutron_plugin.py b/neutron/plugins/ryu/ryu_neutron_plugin.py deleted file mode 100644 index 9fd6bf989..000000000 --- a/neutron/plugins/ryu/ryu_neutron_plugin.py +++ /dev/null @@ -1,269 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 Isaku Yamahata -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Isaku Yamahata - -from oslo.config import cfg -from ryu.app import client -from ryu.app import rest_nw_id - -from neutron.agent import securitygroups_rpc as sg_rpc -from neutron.common import constants as q_const -from neutron.common import exceptions as n_exc -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import api as db -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_gwmode_db -from neutron.db import l3_rpc_base -from neutron.db import models_v2 -from neutron.db import portbindings_base -from neutron.db import securitygroups_rpc_base as sg_db_rpc -from neutron.extensions import portbindings -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as svc_constants -from neutron.plugins.ryu.common import config # noqa -from neutron.plugins.ryu.db import api_v2 as db_api_v2 - - -LOG = logging.getLogger(__name__) - - -class RyuRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin, - l3_rpc_base.L3RpcCallbackMixin, - sg_db_rpc.SecurityGroupServerRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - def __init__(self, ofp_rest_api_addr): - super(RyuRpcCallbacks, self).__init__() - self.ofp_rest_api_addr = ofp_rest_api_addr - - def get_ofp_rest_api(self, context, **kwargs): - LOG.debug(_("get_ofp_rest_api: %s"), self.ofp_rest_api_addr) - return self.ofp_rest_api_addr - - @classmethod - def get_port_from_device(cls, device): - port = db_api_v2.get_port_from_device(device) - if port: - port['device'] = device - return port - - -class AgentNotifierApi(rpc_compat.RpcProxy, - sg_rpc.SecurityGroupAgentRpcApiMixin): - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic): - super(AgentNotifierApi, self).__init__( - topic=topic, default_version=self.BASE_RPC_API_VERSION) - self.topic_port_update = topics.get_topic_name(topic, - topics.PORT, - topics.UPDATE) - - def port_update(self, context, port): - self.fanout_cast(context, - self.make_msg('port_update', port=port), - topic=self.topic_port_update) - - -class RyuNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - sg_db_rpc.SecurityGroupServerRpcMixin, - portbindings_base.PortBindingBaseMixin): - - _supported_extension_aliases = ["external-net", "router", "ext-gw-mode", - "extraroute", "security-group", - "binding", "quotas"] - - @property - def supported_extension_aliases(self): - if not hasattr(self, '_aliases'): - aliases = self._supported_extension_aliases[:] - sg_rpc.disable_security_group_extension_by_config(aliases) - self._aliases = aliases - return self._aliases - - def __init__(self, configfile=None): - super(RyuNeutronPluginV2, self).__init__() - self.base_binding_dict = { - portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS, - portbindings.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - portbindings.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases, - portbindings.OVS_HYBRID_PLUG: True - } - } - portbindings_base.register_port_dict_function() - self.tunnel_key = db_api_v2.TunnelKey( - cfg.CONF.OVS.tunnel_key_min, cfg.CONF.OVS.tunnel_key_max) - self.ofp_api_host = cfg.CONF.OVS.openflow_rest_api - if not self.ofp_api_host: - raise n_exc.Invalid(_('Invalid configuration. check ryu.ini')) - - self.client = client.OFPClient(self.ofp_api_host) - self.tun_client = client.TunnelClient(self.ofp_api_host) - self.iface_client = client.NeutronIfaceClient(self.ofp_api_host) - for nw_id in rest_nw_id.RESERVED_NETWORK_IDS: - if nw_id != rest_nw_id.NW_ID_UNKNOWN: - self.client.update_network(nw_id) - self._setup_rpc() - - # register known all network list on startup - self._create_all_tenant_network() - - def _setup_rpc(self): - self.service_topics = {svc_constants.CORE: topics.PLUGIN, - svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} - self.conn = rpc_compat.create_connection(new=True) - self.notifier = AgentNotifierApi(topics.AGENT) - self.endpoints = [RyuRpcCallbacks(self.ofp_api_host)] - for svc_topic in self.service_topics.values(): - self.conn.create_consumer(svc_topic, self.endpoints, fanout=False) - self.conn.consume_in_threads() - - def _create_all_tenant_network(self): - for net in db_api_v2.network_all_tenant_list(): - self.client.update_network(net.id) - for tun in self.tunnel_key.all_list(): - self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key) - session = db.get_session() - for port in session.query(models_v2.Port): - self.iface_client.update_network_id(port.id, port.network_id) - - def _client_create_network(self, net_id, tunnel_key): - self.client.create_network(net_id) - self.tun_client.create_tunnel_key(net_id, tunnel_key) - - def _client_delete_network(self, net_id): - RyuNeutronPluginV2._safe_client_delete_network(self.safe_reference, - net_id) - - @staticmethod - def _safe_client_delete_network(safe_reference, net_id): - # Avoid handing naked plugin references to the client. When - # the client is mocked for testing, such references can - # prevent the plugin from being deallocated. - client.ignore_http_not_found( - lambda: safe_reference.client.delete_network(net_id)) - client.ignore_http_not_found( - lambda: safe_reference.tun_client.delete_tunnel_key(net_id)) - - def create_network(self, context, network): - session = context.session - with session.begin(subtransactions=True): - #set up default security groups - tenant_id = self._get_tenant_id_for_create( - context, network['network']) - self._ensure_default_security_group(context, tenant_id) - - net = super(RyuNeutronPluginV2, self).create_network(context, - network) - self._process_l3_create(context, net, network['network']) - - tunnel_key = self.tunnel_key.allocate(session, net['id']) - try: - self._client_create_network(net['id'], tunnel_key) - except Exception: - with excutils.save_and_reraise_exception(): - self._client_delete_network(net['id']) - - return net - - def update_network(self, context, id, network): - session = context.session - with session.begin(subtransactions=True): - net = super(RyuNeutronPluginV2, self).update_network(context, id, - network) - self._process_l3_update(context, net, network['network']) - return net - - def delete_network(self, context, id): - self._client_delete_network(id) - session = context.session - with session.begin(subtransactions=True): - self.tunnel_key.delete(session, id) - self._process_l3_delete(context, id) - super(RyuNeutronPluginV2, self).delete_network(context, id) - - def create_port(self, context, port): - session = context.session - port_data = port['port'] - with session.begin(subtransactions=True): - self._ensure_default_security_group_on_port(context, port) - sgids = self._get_security_groups_on_port(context, port) - port = super(RyuNeutronPluginV2, self).create_port(context, port) - self._process_portbindings_create_and_update(context, - port_data, - port) - self._process_port_create_security_group( - context, port, sgids) - self.notify_security_groups_member_updated(context, port) - self.iface_client.create_network_id(port['id'], port['network_id']) - return port - - def delete_port(self, context, id, l3_port_check=True): - # if needed, check to see if this is a port owned by - # and l3-router. If so, we should prevent deletion. - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - - with context.session.begin(subtransactions=True): - self.disassociate_floatingips(context, id) - port = self.get_port(context, id) - self._delete_port_security_group_bindings(context, id) - super(RyuNeutronPluginV2, self).delete_port(context, id) - - self.notify_security_groups_member_updated(context, port) - - def update_port(self, context, id, port): - deleted = port['port'].get('deleted', False) - session = context.session - - need_port_update_notify = False - with session.begin(subtransactions=True): - original_port = super(RyuNeutronPluginV2, self).get_port( - context, id) - updated_port = super(RyuNeutronPluginV2, self).update_port( - context, id, port) - self._process_portbindings_create_and_update(context, - port['port'], - updated_port) - need_port_update_notify = self.update_security_group_on_port( - context, id, port, original_port, updated_port) - - need_port_update_notify |= self.is_security_group_member_updated( - context, original_port, updated_port) - - need_port_update_notify |= (original_port['admin_state_up'] != - updated_port['admin_state_up']) - - if need_port_update_notify: - self.notifier.port_update(context, updated_port) - - if deleted: - db_api_v2.set_port_status(session, id, q_const.PORT_STATUS_DOWN) - return updated_port diff --git a/neutron/plugins/vmware/__init__.py b/neutron/plugins/vmware/__init__.py deleted file mode 100644 index a62818888..000000000 --- a/neutron/plugins/vmware/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -import os - -NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') diff --git a/neutron/plugins/vmware/api_client/__init__.py b/neutron/plugins/vmware/api_client/__init__.py deleted file mode 100644 index 6b7126b02..000000000 --- a/neutron/plugins/vmware/api_client/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import httplib - - -def ctrl_conn_to_str(conn): - """Returns a string representing a connection URL to the controller.""" - if isinstance(conn, httplib.HTTPSConnection): - proto = "https://" - elif isinstance(conn, httplib.HTTPConnection): - proto = "http://" - else: - raise TypeError(_('Invalid connection type: %s') % type(conn)) - return "%s%s:%s" % (proto, conn.host, conn.port) diff --git a/neutron/plugins/vmware/api_client/base.py b/neutron/plugins/vmware/api_client/base.py deleted file mode 100644 index e8998b5cd..000000000 --- a/neutron/plugins/vmware/api_client/base.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import httplib -import six -import time - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware import api_client - -LOG = logging.getLogger(__name__) - -GENERATION_ID_TIMEOUT = -1 -DEFAULT_CONCURRENT_CONNECTIONS = 3 -DEFAULT_CONNECT_TIMEOUT = 5 - - -@six.add_metaclass(abc.ABCMeta) -class ApiClientBase(object): - """An abstract baseclass for all API client implementations.""" - - CONN_IDLE_TIMEOUT = 60 * 15 - - def _create_connection(self, host, port, is_ssl): - if is_ssl: - return httplib.HTTPSConnection(host, port, - timeout=self._connect_timeout) - return httplib.HTTPConnection(host, port, - timeout=self._connect_timeout) - - @staticmethod - def _conn_params(http_conn): - is_ssl = isinstance(http_conn, httplib.HTTPSConnection) - return (http_conn.host, http_conn.port, is_ssl) - - @property - def user(self): - return self._user - - @property - def password(self): - return self._password - - @property - def config_gen(self): - # If NSX_gen_timeout is not -1 then: - # Maintain a timestamp along with the generation ID. Hold onto the - # ID long enough to be useful and block on sequential requests but - # not long enough to persist when Onix db is cleared, which resets - # the generation ID, causing the DAL to block indefinitely with some - # number that's higher than the cluster's value. - if self._gen_timeout != -1: - ts = self._config_gen_ts - if ts is not None: - if (time.time() - ts) > self._gen_timeout: - return None - return self._config_gen - - @config_gen.setter - def config_gen(self, value): - if self._config_gen != value: - if self._gen_timeout != -1: - self._config_gen_ts = time.time() - self._config_gen = value - - def auth_cookie(self, conn): - cookie = None - data = self._get_provider_data(conn) - if data: - cookie = data[1] - return cookie - - def set_auth_cookie(self, conn, cookie): - data = self._get_provider_data(conn) - if data: - self._set_provider_data(conn, (data[0], cookie)) - - def acquire_connection(self, auto_login=True, headers=None, rid=-1): - '''Check out an available HTTPConnection instance. - - Blocks until a connection is available. - :auto_login: automatically logins before returning conn - :headers: header to pass on to login attempt - :param rid: request id passed in from request eventlet. - :returns: An available HTTPConnection instance or None if no - api_providers are configured. - ''' - if not self._api_providers: - LOG.warn(_("[%d] no API providers currently available."), rid) - return None - if self._conn_pool.empty(): - LOG.debug(_("[%d] Waiting to acquire API client connection."), rid) - priority, conn = self._conn_pool.get() - now = time.time() - if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT: - LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " - "seconds; reconnecting."), - {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), - 'sec': now - conn.last_used}) - conn = self._create_connection(*self._conn_params(conn)) - - conn.last_used = now - conn.priority = priority # stash current priority for release - qsize = self._conn_pool.qsize() - LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " - "connection(s) available."), - {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), - 'qsize': qsize}) - if auto_login and self.auth_cookie(conn) is None: - self._wait_for_login(conn, headers) - return conn - - def release_connection(self, http_conn, bad_state=False, - service_unavail=False, rid=-1): - '''Mark HTTPConnection instance as available for check-out. - - :param http_conn: An HTTPConnection instance obtained from this - instance. - :param bad_state: True if http_conn is known to be in a bad state - (e.g. connection fault.) - :service_unavail: True if http_conn returned 503 response. - :param rid: request id passed in from request eventlet. - ''' - conn_params = self._conn_params(http_conn) - if self._conn_params(http_conn) not in self._api_providers: - LOG.debug(_("[%(rid)d] Released connection %(conn)s is not an " - "API provider for the cluster"), - {'rid': rid, - 'conn': api_client.ctrl_conn_to_str(http_conn)}) - return - elif hasattr(http_conn, "no_release"): - return - - if bad_state: - # Reconnect to provider. - LOG.warn(_("[%(rid)d] Connection returned in bad state, " - "reconnecting to %(conn)s"), - {'rid': rid, - 'conn': api_client.ctrl_conn_to_str(http_conn)}) - http_conn = self._create_connection(*self._conn_params(http_conn)) - priority = self._next_conn_priority - self._next_conn_priority += 1 - elif service_unavail: - # http_conn returned a service unaviable response, put other - # connections to the same controller at end of priority queue, - conns = [] - while not self._conn_pool.empty(): - priority, conn = self._conn_pool.get() - if self._conn_params(conn) == conn_params: - priority = self._next_conn_priority - self._next_conn_priority += 1 - conns.append((priority, conn)) - for priority, conn in conns: - self._conn_pool.put((priority, conn)) - # put http_conn at end of queue also - priority = self._next_conn_priority - self._next_conn_priority += 1 - else: - priority = http_conn.priority - - self._conn_pool.put((priority, http_conn)) - LOG.debug(_("[%(rid)d] Released connection %(conn)s. %(qsize)d " - "connection(s) available."), - {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), - 'qsize': self._conn_pool.qsize()}) - - def _wait_for_login(self, conn, headers=None): - '''Block until a login has occurred for the current API provider.''' - - data = self._get_provider_data(conn) - if data is None: - LOG.error(_("Login request for an invalid connection: '%s'"), - api_client.ctrl_conn_to_str(conn)) - return - provider_sem = data[0] - if provider_sem.acquire(blocking=False): - try: - cookie = self._login(conn, headers) - self.set_auth_cookie(conn, cookie) - finally: - provider_sem.release() - else: - LOG.debug(_("Waiting for auth to complete")) - # Wait until we can acquire then release - provider_sem.acquire(blocking=True) - provider_sem.release() - - def _get_provider_data(self, conn_or_conn_params, default=None): - """Get data for specified API provider. - - Args: - conn_or_conn_params: either a HTTP(S)Connection object or the - resolved conn_params tuple returned by self._conn_params(). - default: conn_params if ones passed aren't known - Returns: Data associated with specified provider - """ - conn_params = self._normalize_conn_params(conn_or_conn_params) - return self._api_provider_data.get(conn_params, default) - - def _set_provider_data(self, conn_or_conn_params, data): - """Set data for specified API provider. - - Args: - conn_or_conn_params: either a HTTP(S)Connection object or the - resolved conn_params tuple returned by self._conn_params(). - data: data to associate with API provider - """ - conn_params = self._normalize_conn_params(conn_or_conn_params) - if data is None: - del self._api_provider_data[conn_params] - else: - self._api_provider_data[conn_params] = data - - def _normalize_conn_params(self, conn_or_conn_params): - """Normalize conn_param tuple. - - Args: - conn_or_conn_params: either a HTTP(S)Connection object or the - resolved conn_params tuple returned by self._conn_params(). - - Returns: Normalized conn_param tuple - """ - if (not isinstance(conn_or_conn_params, tuple) and - not isinstance(conn_or_conn_params, httplib.HTTPConnection)): - LOG.debug(_("Invalid conn_params value: '%s'"), - str(conn_or_conn_params)) - return conn_or_conn_params - if isinstance(conn_or_conn_params, httplib.HTTPConnection): - conn_params = self._conn_params(conn_or_conn_params) - else: - conn_params = conn_or_conn_params - host, port, is_ssl = conn_params - if port is None: - port = 443 if is_ssl else 80 - return (host, port, is_ssl) diff --git a/neutron/plugins/vmware/api_client/client.py b/neutron/plugins/vmware/api_client/client.py deleted file mode 100644 index a6981a853..000000000 --- a/neutron/plugins/vmware/api_client/client.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import httplib - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.api_client import base -from neutron.plugins.vmware.api_client import eventlet_client -from neutron.plugins.vmware.api_client import eventlet_request -from neutron.plugins.vmware.api_client import exception -from neutron.plugins.vmware.api_client import version - -LOG = logging.getLogger(__name__) - - -class NsxApiClient(eventlet_client.EventletApiClient): - """The Nsx API Client.""" - - def __init__(self, api_providers, user, password, - concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, - gen_timeout=base.GENERATION_ID_TIMEOUT, - use_https=True, - connect_timeout=base.DEFAULT_CONNECT_TIMEOUT, - request_timeout=30, http_timeout=10, retries=2, redirects=2): - '''Constructor. Adds the following: - - :param request_timeout: all operations (including retries, redirects - from unresponsive controllers, etc) should finish within this - timeout. - :param http_timeout: how long to wait before aborting an - unresponsive controller (and allow for retries to another - controller in the cluster) - :param retries: the number of concurrent connections. - :param redirects: the number of concurrent connections. - ''' - super(NsxApiClient, self).__init__( - api_providers, user, password, - concurrent_connections=concurrent_connections, - gen_timeout=gen_timeout, use_https=use_https, - connect_timeout=connect_timeout) - - self._request_timeout = request_timeout - self._http_timeout = http_timeout - self._retries = retries - self._redirects = redirects - self._version = None - - # NOTE(salvatore-orlando): This method is not used anymore. Login is now - # performed automatically inside the request eventlet if necessary. - def login(self, user=None, password=None): - '''Login to NSX controller. - - Assumes same password is used for all controllers. - - :param user: controller user (usually admin). Provided for - backwards compatibility. In the normal mode of operation - this should be None. - :param password: controller password. Provided for backwards - compatibility. In the normal mode of operation this should - be None. - ''' - if user: - self._user = user - if password: - self._password = password - - return self._login() - - def request(self, method, url, body="", content_type="application/json"): - '''Issues request to controller.''' - - g = eventlet_request.GenericRequestEventlet( - self, method, url, body, content_type, auto_login=True, - request_timeout=self._request_timeout, - http_timeout=self._http_timeout, - retries=self._retries, redirects=self._redirects) - g.start() - response = g.join() - LOG.debug(_('Request returns "%s"'), response) - - # response is a modified HTTPResponse object or None. - # response.read() will not work on response as the underlying library - # request_eventlet.ApiRequestEventlet has already called this - # method in order to extract the body and headers for processing. - # ApiRequestEventlet derived classes call .read() and - # .getheaders() on the HTTPResponse objects and store the results in - # the response object's .body and .headers data members for future - # access. - - if response is None: - # Timeout. - LOG.error(_('Request timed out: %(method)s to %(url)s'), - {'method': method, 'url': url}) - raise exception.RequestTimeout() - - status = response.status - if status == httplib.UNAUTHORIZED: - raise exception.UnAuthorizedRequest() - - # Fail-fast: Check for exception conditions and raise the - # appropriate exceptions for known error codes. - if status in exception.ERROR_MAPPINGS: - LOG.error(_("Received error code: %s"), status) - LOG.error(_("Server Error Message: %s"), response.body) - exception.ERROR_MAPPINGS[status](response) - - # Continue processing for non-error condition. - if (status != httplib.OK and status != httplib.CREATED - and status != httplib.NO_CONTENT): - LOG.error(_("%(method)s to %(url)s, unexpected response code: " - "%(status)d (content = '%(body)s')"), - {'method': method, 'url': url, - 'status': response.status, 'body': response.body}) - return None - - if not self._version: - self._version = version.find_version(response.headers) - return response.body - - def get_version(self): - if not self._version: - # Determine the controller version by querying the - # cluster nodes. Currently, the version will be the - # one of the server that responds. - self.request('GET', '/ws.v1/control-cluster/node') - if not self._version: - LOG.error(_('Unable to determine NSX version. ' - 'Plugin might not work as expected.')) - return self._version diff --git a/neutron/plugins/vmware/api_client/eventlet_client.py b/neutron/plugins/vmware/api_client/eventlet_client.py deleted file mode 100644 index fa0cd1f3e..000000000 --- a/neutron/plugins/vmware/api_client/eventlet_client.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import time - -import eventlet -eventlet.monkey_patch() - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.api_client import base -from neutron.plugins.vmware.api_client import eventlet_request - -LOG = logging.getLogger(__name__) - - -class EventletApiClient(base.ApiClientBase): - """Eventlet-based implementation of NSX ApiClient ABC.""" - - def __init__(self, api_providers, user, password, - concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, - gen_timeout=base.GENERATION_ID_TIMEOUT, - use_https=True, - connect_timeout=base.DEFAULT_CONNECT_TIMEOUT): - '''Constructor - - :param api_providers: a list of tuples of the form: (host, port, - is_ssl). - :param user: login username. - :param password: login password. - :param concurrent_connections: total number of concurrent connections. - :param use_https: whether or not to use https for requests. - :param connect_timeout: connection timeout in seconds. - :param gen_timeout controls how long the generation id is kept - if set to -1 the generation id is never timed out - ''' - if not api_providers: - api_providers = [] - self._api_providers = set([tuple(p) for p in api_providers]) - self._api_provider_data = {} # tuple(semaphore, session_cookie) - for p in self._api_providers: - self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None)) - self._user = user - self._password = password - self._concurrent_connections = concurrent_connections - self._use_https = use_https - self._connect_timeout = connect_timeout - self._config_gen = None - self._config_gen_ts = None - self._gen_timeout = gen_timeout - - # Connection pool is a list of queues. - self._conn_pool = eventlet.queue.PriorityQueue() - self._next_conn_priority = 1 - for host, port, is_ssl in api_providers: - for _ in range(concurrent_connections): - conn = self._create_connection(host, port, is_ssl) - self._conn_pool.put((self._next_conn_priority, conn)) - self._next_conn_priority += 1 - - def acquire_redirect_connection(self, conn_params, auto_login=True, - headers=None): - """Check out or create connection to redirected NSX API server. - - Args: - conn_params: tuple specifying target of redirect, see - self._conn_params() - auto_login: returned connection should have valid session cookie - headers: headers to pass on if auto_login - - Returns: An available HTTPConnection instance corresponding to the - specified conn_params. If a connection did not previously - exist, new connections are created with the highest prioity - in the connection pool and one of these new connections - returned. - """ - result_conn = None - data = self._get_provider_data(conn_params) - if data: - # redirect target already exists in provider data and connections - # to the provider have been added to the connection pool. Try to - # obtain a connection from the pool, note that it's possible that - # all connection to the provider are currently in use. - conns = [] - while not self._conn_pool.empty(): - priority, conn = self._conn_pool.get_nowait() - if not result_conn and self._conn_params(conn) == conn_params: - conn.priority = priority - result_conn = conn - else: - conns.append((priority, conn)) - for priority, conn in conns: - self._conn_pool.put((priority, conn)) - # hack: if no free connections available, create new connection - # and stash "no_release" attribute (so that we only exceed - # self._concurrent_connections temporarily) - if not result_conn: - conn = self._create_connection(*conn_params) - conn.priority = 0 # redirect connections have highest priority - conn.no_release = True - result_conn = conn - else: - #redirect target not already known, setup provider lists - self._api_providers.update([conn_params]) - self._set_provider_data(conn_params, - (eventlet.semaphore.Semaphore(1), None)) - # redirects occur during cluster upgrades, i.e. results to old - # redirects to new, so give redirect targets highest priority - priority = 0 - for i in range(self._concurrent_connections): - conn = self._create_connection(*conn_params) - conn.priority = priority - if i == self._concurrent_connections - 1: - break - self._conn_pool.put((priority, conn)) - result_conn = conn - if result_conn: - result_conn.last_used = time.time() - if auto_login and self.auth_cookie(conn) is None: - self._wait_for_login(result_conn, headers) - return result_conn - - def _login(self, conn=None, headers=None): - '''Issue login request and update authentication cookie.''' - cookie = None - g = eventlet_request.LoginRequestEventlet( - self, self._user, self._password, conn, headers) - g.start() - ret = g.join() - if ret: - if isinstance(ret, Exception): - LOG.error(_('Login error "%s"'), ret) - raise ret - - cookie = ret.getheader("Set-Cookie") - if cookie: - LOG.debug(_("Saving new authentication cookie '%s'"), cookie) - - return cookie - -# Register as subclass. -base.ApiClientBase.register(EventletApiClient) diff --git a/neutron/plugins/vmware/api_client/eventlet_request.py b/neutron/plugins/vmware/api_client/eventlet_request.py deleted file mode 100644 index 26c378e0c..000000000 --- a/neutron/plugins/vmware/api_client/eventlet_request.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import eventlet -import httplib -import urllib - -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.api_client import request - -LOG = logging.getLogger(__name__) -USER_AGENT = "Neutron eventlet client/2.0" - - -class EventletApiRequest(request.ApiRequest): - '''Eventlet-based ApiRequest class. - - This class will form the basis for eventlet-based ApiRequest classes - ''' - - # Maximum number of green threads present in the system at one time. - API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE - - # Pool of green threads. One green thread is allocated per incoming - # request. Incoming requests will block when the pool is empty. - API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE) - - # A unique id is assigned to each incoming request. When the current - # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0. - MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID - - # The request id for the next incoming request. - CURRENT_REQUEST_ID = 0 - - def __init__(self, client_obj, url, method="GET", body=None, - headers=None, - request_timeout=request.DEFAULT_REQUEST_TIMEOUT, - retries=request.DEFAULT_RETRIES, - auto_login=True, - redirects=request.DEFAULT_REDIRECTS, - http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None): - '''Constructor.''' - self._api_client = client_obj - self._url = url - self._method = method - self._body = body - self._headers = headers or {} - self._request_timeout = request_timeout - self._retries = retries - self._auto_login = auto_login - self._redirects = redirects - self._http_timeout = http_timeout - self._client_conn = client_conn - self._abort = False - - self._request_error = None - - if "User-Agent" not in self._headers: - self._headers["User-Agent"] = USER_AGENT - - self._green_thread = None - # Retrieve and store this instance's unique request id. - self._request_id = EventletApiRequest.CURRENT_REQUEST_ID - # Update the class variable that tracks request id. - # Request IDs wrap around at MAXIMUM_REQUEST_ID - next_request_id = self._request_id + 1 - next_request_id %= self.MAXIMUM_REQUEST_ID - EventletApiRequest.CURRENT_REQUEST_ID = next_request_id - - @classmethod - def _spawn(cls, func, *args, **kwargs): - '''Allocate a green thread from the class pool.''' - return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs) - - def spawn(self, func, *args, **kwargs): - '''Spawn a new green thread with the supplied function and args.''' - return self.__class__._spawn(func, *args, **kwargs) - - @classmethod - def joinall(cls): - '''Wait for all outstanding requests to complete.''' - return cls.API_REQUEST_POOL.waitall() - - def join(self): - '''Wait for instance green thread to complete.''' - if self._green_thread is not None: - return self._green_thread.wait() - return Exception(_('Joining an invalid green thread')) - - def start(self): - '''Start request processing.''' - self._green_thread = self.spawn(self._run) - - def copy(self): - '''Return a copy of this request instance.''' - return EventletApiRequest( - self._api_client, self._url, self._method, self._body, - self._headers, self._request_timeout, self._retries, - self._auto_login, self._redirects, self._http_timeout) - - def _run(self): - '''Method executed within green thread.''' - if self._request_timeout: - # No timeout exception escapes the with block. - with eventlet.timeout.Timeout(self._request_timeout, False): - return self._handle_request() - - LOG.info(_('[%d] Request timeout.'), self._rid()) - self._request_error = Exception(_('Request timeout')) - return None - else: - return self._handle_request() - - def _handle_request(self): - '''First level request handling.''' - attempt = 0 - timeout = 0 - response = None - while response is None and attempt <= self._retries: - eventlet.greenthread.sleep(timeout) - attempt += 1 - - req = self._issue_request() - # automatically raises any exceptions returned. - if isinstance(req, httplib.HTTPResponse): - timeout = 0 - if attempt <= self._retries and not self._abort: - if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): - continue - elif req.status == httplib.SERVICE_UNAVAILABLE: - timeout = 0.5 - continue - # else fall through to return the error code - - LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'" - ": %(status)s"), - {'rid': self._rid(), 'method': self._method, - 'url': self._url, 'status': req.status}) - self._request_error = None - response = req - else: - LOG.info(_('[%(rid)d] Error while handling request: %(req)s'), - {'rid': self._rid(), 'req': req}) - self._request_error = req - response = None - return response - - -class LoginRequestEventlet(EventletApiRequest): - '''Process a login request.''' - - def __init__(self, client_obj, user, password, client_conn=None, - headers=None): - if headers is None: - headers = {} - headers.update({"Content-Type": "application/x-www-form-urlencoded"}) - body = urllib.urlencode({"username": user, "password": password}) - super(LoginRequestEventlet, self).__init__( - client_obj, "/ws.v1/login", "POST", body, headers, - auto_login=False, client_conn=client_conn) - - def session_cookie(self): - if self.successful(): - return self.value.getheader("Set-Cookie") - return None - - -class GetApiProvidersRequestEventlet(EventletApiRequest): - '''Get a list of API providers.''' - - def __init__(self, client_obj): - url = "/ws.v1/control-cluster/node?fields=roles" - super(GetApiProvidersRequestEventlet, self).__init__( - client_obj, url, "GET", auto_login=True) - - def api_providers(self): - """Parse api_providers from response. - - Returns: api_providers in [(host, port, is_ssl), ...] format - """ - def _provider_from_listen_addr(addr): - # (pssl|ptcp):: => (host, port, is_ssl) - parts = addr.split(':') - return (parts[1], int(parts[2]), parts[0] == 'pssl') - - try: - if self.successful(): - ret = [] - body = json.loads(self.value.body) - for node in body.get('results', []): - for role in node.get('roles', []): - if role.get('role') == 'api_provider': - addr = role.get('listen_addr') - if addr: - ret.append(_provider_from_listen_addr(addr)) - return ret - except Exception as e: - LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"), - {'rid': self._rid(), 'e': e}) - # intentionally fall through - return None - - -class GenericRequestEventlet(EventletApiRequest): - '''Handle a generic request.''' - - def __init__(self, client_obj, method, url, body, content_type, - auto_login=False, - request_timeout=request.DEFAULT_REQUEST_TIMEOUT, - http_timeout=request.DEFAULT_HTTP_TIMEOUT, - retries=request.DEFAULT_RETRIES, - redirects=request.DEFAULT_REDIRECTS): - headers = {"Content-Type": content_type} - super(GenericRequestEventlet, self).__init__( - client_obj, url, method, body, headers, - request_timeout=request_timeout, retries=retries, - auto_login=auto_login, redirects=redirects, - http_timeout=http_timeout) - - def session_cookie(self): - if self.successful(): - return self.value.getheader("Set-Cookie") - return None - - -request.ApiRequest.register(EventletApiRequest) diff --git a/neutron/plugins/vmware/api_client/exception.py b/neutron/plugins/vmware/api_client/exception.py deleted file mode 100644 index b3facfcaa..000000000 --- a/neutron/plugins/vmware/api_client/exception.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -class NsxApiException(Exception): - """Base NSX API Client Exception. - - To correctly use this class, inherit from it and define - a 'message' property. That message will get printf'd - with the keyword arguments provided to the constructor. - - """ - message = _("An unknown exception occurred.") - - def __init__(self, **kwargs): - try: - self._error_string = self.message % kwargs - except Exception: - # at least get the core message out if something happened - self._error_string = self.message - - def __str__(self): - return self._error_string - - -class UnAuthorizedRequest(NsxApiException): - message = _("Server denied session's authentication credentials.") - - -class ResourceNotFound(NsxApiException): - message = _("An entity referenced in the request was not found.") - - -class Conflict(NsxApiException): - message = _("Request conflicts with configuration on a different " - "entity.") - - -class ServiceUnavailable(NsxApiException): - message = _("Request could not completed because the associated " - "resource could not be reached.") - - -class Forbidden(NsxApiException): - message = _("The request is forbidden from accessing the " - "referenced resource.") - - -class ReadOnlyMode(Forbidden): - message = _("Create/Update actions are forbidden when in read-only mode.") - - -class RequestTimeout(NsxApiException): - message = _("The request has timed out.") - - -class BadRequest(NsxApiException): - message = _("The server is unable to fulfill the request due " - "to a bad syntax") - - -class InvalidSecurityCertificate(BadRequest): - message = _("The backend received an invalid security certificate.") - - -def fourZeroZero(response=None): - if response and "Invalid SecurityCertificate" in response.body: - raise InvalidSecurityCertificate() - raise BadRequest() - - -def fourZeroFour(response=None): - raise ResourceNotFound() - - -def fourZeroNine(response=None): - raise Conflict() - - -def fiveZeroThree(response=None): - raise ServiceUnavailable() - - -def fourZeroThree(response=None): - if 'read-only' in response.body: - raise ReadOnlyMode() - else: - raise Forbidden() - - -def zero(self, response=None): - raise NsxApiException() - - -ERROR_MAPPINGS = { - 400: fourZeroZero, - 404: fourZeroFour, - 405: zero, - 409: fourZeroNine, - 503: fiveZeroThree, - 403: fourZeroThree, - 301: zero, - 307: zero, - 500: zero, - 501: zero, - 503: zero -} diff --git a/neutron/plugins/vmware/api_client/request.py b/neutron/plugins/vmware/api_client/request.py deleted file mode 100644 index 70e7dcef4..000000000 --- a/neutron/plugins/vmware/api_client/request.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc -import copy -import eventlet -import httplib -import time - -import six -import six.moves.urllib.parse as urlparse - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware import api_client - -LOG = logging.getLogger(__name__) - -DEFAULT_REQUEST_TIMEOUT = 30 -DEFAULT_HTTP_TIMEOUT = 30 -DEFAULT_RETRIES = 2 -DEFAULT_REDIRECTS = 2 -DEFAULT_API_REQUEST_POOL_SIZE = 1000 -DEFAULT_MAXIMUM_REQUEST_ID = 4294967295 -DOWNLOAD_TIMEOUT = 180 - - -@six.add_metaclass(abc.ABCMeta) -class ApiRequest(object): - '''An abstract baseclass for all ApiRequest implementations. - - This defines the interface and property structure for both eventlet and - gevent-based ApiRequest classes. - ''' - - # List of allowed status codes. - ALLOWED_STATUS_CODES = [ - httplib.OK, - httplib.CREATED, - httplib.NO_CONTENT, - httplib.MOVED_PERMANENTLY, - httplib.TEMPORARY_REDIRECT, - httplib.BAD_REQUEST, - httplib.UNAUTHORIZED, - httplib.FORBIDDEN, - httplib.NOT_FOUND, - httplib.CONFLICT, - httplib.INTERNAL_SERVER_ERROR, - httplib.SERVICE_UNAVAILABLE - ] - - @abc.abstractmethod - def start(self): - pass - - @abc.abstractmethod - def join(self): - pass - - @abc.abstractmethod - def copy(self): - pass - - def _issue_request(self): - '''Issue a request to a provider.''' - conn = (self._client_conn or - self._api_client.acquire_connection(True, - copy.copy(self._headers), - rid=self._rid())) - if conn is None: - error = Exception(_("No API connections available")) - self._request_error = error - return error - - url = self._url - LOG.debug(_("[%(rid)d] Issuing - request %(conn)s"), - {'rid': self._rid(), 'conn': self._request_str(conn, url)}) - issued_time = time.time() - is_conn_error = False - is_conn_service_unavail = False - response = None - try: - redirects = 0 - while (redirects <= self._redirects): - # Update connection with user specified request timeout, - # the connect timeout is usually smaller so we only set - # the request timeout after a connection is established - if conn.sock is None: - conn.connect() - conn.sock.settimeout(self._http_timeout) - elif conn.sock.gettimeout() != self._http_timeout: - conn.sock.settimeout(self._http_timeout) - - headers = copy.copy(self._headers) - cookie = self._api_client.auth_cookie(conn) - if cookie: - headers["Cookie"] = cookie - - gen = self._api_client.config_gen - if gen: - headers["X-Nvp-Wait-For-Config-Generation"] = gen - LOG.debug(_("Setting X-Nvp-Wait-For-Config-Generation " - "request header: '%s'"), gen) - try: - conn.request(self._method, url, self._body, headers) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.warn(_("[%(rid)d] Exception issuing request: " - "%(e)s"), - {'rid': self._rid(), 'e': e}) - - response = conn.getresponse() - response.body = response.read() - response.headers = response.getheaders() - elapsed_time = time.time() - issued_time - LOG.debug(_("[%(rid)d] Completed request '%(conn)s': " - "%(status)s (%(elapsed)s seconds)"), - {'rid': self._rid(), - 'conn': self._request_str(conn, url), - 'status': response.status, - 'elapsed': elapsed_time}) - - new_gen = response.getheader('X-Nvp-Config-Generation', None) - if new_gen: - LOG.debug(_("Reading X-Nvp-config-Generation response " - "header: '%s'"), new_gen) - if (self._api_client.config_gen is None or - self._api_client.config_gen < int(new_gen)): - self._api_client.config_gen = int(new_gen) - - if response.status == httplib.UNAUTHORIZED: - - if cookie is None and self._url != "/ws.v1/login": - # The connection still has no valid cookie despite - # attemps to authenticate and the request has failed - # with unauthorized status code. If this isn't a - # a request to authenticate, we should abort the - # request since there is no point in retrying. - self._abort = True - else: - # If request is unauthorized, clear the session cookie - # for the current provider so that subsequent requests - # to the same provider triggers re-authentication. - self._api_client.set_auth_cookie(conn, None) - - self._api_client.set_auth_cookie(conn, None) - elif response.status == httplib.SERVICE_UNAVAILABLE: - is_conn_service_unavail = True - - if response.status not in [httplib.MOVED_PERMANENTLY, - httplib.TEMPORARY_REDIRECT]: - break - elif redirects >= self._redirects: - LOG.info(_("[%d] Maximum redirects exceeded, aborting " - "request"), self._rid()) - break - redirects += 1 - - conn, url = self._redirect_params(conn, response.headers, - self._client_conn is None) - if url is None: - response.status = httplib.INTERNAL_SERVER_ERROR - break - LOG.info(_("[%(rid)d] Redirecting request to: %(conn)s"), - {'rid': self._rid(), - 'conn': self._request_str(conn, url)}) - # yield here, just in case we are not out of the loop yet - eventlet.greenthread.sleep(0) - # If we receive any of these responses, then - # our server did not process our request and may be in an - # errored state. Raise an exception, which will cause the - # the conn to be released with is_conn_error == True - # which puts the conn on the back of the client's priority - # queue. - if (response.status == httplib.INTERNAL_SERVER_ERROR and - response.status > httplib.NOT_IMPLEMENTED): - LOG.warn(_("[%(rid)d] Request '%(method)s %(url)s' " - "received: %(status)s"), - {'rid': self._rid(), 'method': self._method, - 'url': self._url, 'status': response.status}) - raise Exception(_('Server error return: %s'), response.status) - return response - except Exception as e: - if isinstance(e, httplib.BadStatusLine): - msg = (_("Invalid server response")) - else: - msg = unicode(e) - if response is None: - elapsed_time = time.time() - issued_time - LOG.warn(_("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " - "(%(elapsed)s seconds)"), - {'rid': self._rid(), 'conn': self._request_str(conn, url), - 'msg': msg, 'elapsed': elapsed_time}) - self._request_error = e - is_conn_error = True - return e - finally: - # Make sure we release the original connection provided by the - # acquire_connection() call above. - if self._client_conn is None: - self._api_client.release_connection(conn, is_conn_error, - is_conn_service_unavail, - rid=self._rid()) - - def _redirect_params(self, conn, headers, allow_release_conn=False): - """Process redirect response, create new connection if necessary. - - Args: - conn: connection that returned the redirect response - headers: response headers of the redirect response - allow_release_conn: if redirecting to a different server, - release existing connection back to connection pool. - - Returns: Return tuple(conn, url) where conn is a connection object - to the redirect target and url is the path of the API request - """ - - url = None - for name, value in headers: - if name.lower() == "location": - url = value - break - if not url: - LOG.warn(_("[%d] Received redirect status without location header" - " field"), self._rid()) - return (conn, None) - # Accept location with the following format: - # 1. /path, redirect to same node - # 2. scheme://hostname:[port]/path where scheme is https or http - # Reject others - # 3. e.g. relative paths, unsupported scheme, unspecified host - result = urlparse.urlparse(url) - if not result.scheme and not result.hostname and result.path: - if result.path[0] == "/": - if result.query: - url = "%s?%s" % (result.path, result.query) - else: - url = result.path - return (conn, url) # case 1 - else: - LOG.warn(_("[%(rid)d] Received invalid redirect location: " - "'%(url)s'"), {'rid': self._rid(), 'url': url}) - return (conn, None) # case 3 - elif result.scheme not in ["http", "https"] or not result.hostname: - LOG.warn(_("[%(rid)d] Received malformed redirect " - "location: %(url)s"), {'rid': self._rid(), 'url': url}) - return (conn, None) # case 3 - # case 2, redirect location includes a scheme - # so setup a new connection and authenticate - if allow_release_conn: - self._api_client.release_connection(conn) - conn_params = (result.hostname, result.port, result.scheme == "https") - conn = self._api_client.acquire_redirect_connection(conn_params, True, - self._headers) - if result.query: - url = "%s?%s" % (result.path, result.query) - else: - url = result.path - return (conn, url) - - def _rid(self): - '''Return current request id.''' - return self._request_id - - @property - def request_error(self): - '''Return any errors associated with this instance.''' - return self._request_error - - def _request_str(self, conn, url): - '''Return string representation of connection.''' - return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn), - url) diff --git a/neutron/plugins/vmware/api_client/version.py b/neutron/plugins/vmware/api_client/version.py deleted file mode 100644 index 52fcd74b4..000000000 --- a/neutron/plugins/vmware/api_client/version.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - - -def find_version(headers): - """Retrieve NSX controller version from response headers.""" - for (header_name, header_value) in (headers or ()): - try: - if header_name == 'server': - return Version(header_value.split('/')[1]) - except IndexError: - LOG.warning(_("Unable to fetch NSX version from response " - "headers :%s"), headers) - - -class Version(object): - """Abstracts NSX version by exposing major and minor.""" - - def __init__(self, version): - self.full_version = version.split('.') - self.major = int(self.full_version[0]) - self.minor = int(self.full_version[1]) - - def __str__(self): - return '.'.join(self.full_version) diff --git a/neutron/plugins/vmware/check_nsx_config.py b/neutron/plugins/vmware/check_nsx_config.py deleted file mode 100644 index 14eca41cb..000000000 --- a/neutron/plugins/vmware/check_nsx_config.py +++ /dev/null @@ -1,163 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import sys - -from oslo.config import cfg - -from neutron.common import config -from neutron.plugins.vmware.common import config as nsx_config # noqa -from neutron.plugins.vmware.common import nsx_utils -from neutron.plugins.vmware import nsxlib - -config.setup_logging(cfg.CONF) - - -def help(name): - print("Usage: %s path/to/neutron/plugin/ini/config/file" % name) - sys.exit(1) - - -def get_nsx_controllers(cluster): - return cluster.nsx_controllers - - -def config_helper(config_entity, cluster): - try: - return nsxlib.do_request('GET', - "/ws.v1/%s?fields=uuid" % config_entity, - cluster=cluster).get('results', []) - except Exception as e: - msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") - % {'err': str(e), - 'ctl': ', '.join(get_nsx_controllers(cluster))}) - raise Exception(msg) - - -def get_control_cluster_nodes(cluster): - return config_helper("control-cluster/node", cluster) - - -def get_gateway_services(cluster): - ret_gw_services = {"L2GatewayServiceConfig": [], - "L3GatewayServiceConfig": []} - gw_services = config_helper("gateway-service", cluster) - for gw_service in gw_services: - ret_gw_services[gw_service['type']].append(gw_service['uuid']) - return ret_gw_services - - -def get_transport_zones(cluster): - transport_zones = config_helper("transport-zone", cluster) - return [transport_zone['uuid'] for transport_zone in transport_zones] - - -def get_transport_nodes(cluster): - transport_nodes = config_helper("transport-node", cluster) - return [transport_node['uuid'] for transport_node in transport_nodes] - - -def is_transport_node_connected(cluster, node_uuid): - try: - return nsxlib.do_request('GET', - "/ws.v1/transport-node/%s/status" % node_uuid, - cluster=cluster)['connection']['connected'] - except Exception as e: - msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.") - % {'err': str(e), - 'ctl': ', '.join(get_nsx_controllers(cluster))}) - raise Exception(msg) - - -def main(): - if len(sys.argv) != 2: - help(sys.argv[0]) - args = ['--config-file'] - args.append(sys.argv[1]) - config.init(args) - print("----------------------- Database Options -----------------------") - print("\tconnection: %s" % cfg.CONF.database.connection) - print("\tretry_interval: %d" % cfg.CONF.database.retry_interval) - print("\tmax_retries: %d" % cfg.CONF.database.max_retries) - print("----------------------- NSX Options -----------------------") - print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout) - print("\tNumber of concurrent connections to each controller %d" % - cfg.CONF.NSX.concurrent_connections) - print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls) - print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls) - print("----------------------- Cluster Options -----------------------") - print("\trequested_timeout: %s" % cfg.CONF.req_timeout) - print("\tretries: %s" % cfg.CONF.retries) - print("\tredirects: %s" % cfg.CONF.redirects) - print("\thttp_timeout: %s" % cfg.CONF.http_timeout) - cluster = nsx_utils.create_nsx_cluster( - cfg.CONF, - cfg.CONF.NSX.concurrent_connections, - cfg.CONF.NSX.nsx_gen_timeout) - nsx_controllers = get_nsx_controllers(cluster) - num_controllers = len(nsx_controllers) - print("Number of controllers found: %s" % num_controllers) - if num_controllers == 0: - print("You must specify at least one controller!") - sys.exit(1) - - get_control_cluster_nodes(cluster) - for controller in nsx_controllers: - print("\tController endpoint: %s" % controller) - gateway_services = get_gateway_services(cluster) - default_gateways = { - "L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid, - "L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid} - errors = 0 - for svc_type in default_gateways.keys(): - for uuid in gateway_services[svc_type]: - print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid)) - if (default_gateways[svc_type] and - default_gateways[svc_type] not in gateway_services[svc_type]): - print("\t\t\tError: specified default %s gateway (%s) is " - "missing from NSX Gateway Services!" % ( - svc_type, - default_gateways[svc_type])) - errors += 1 - transport_zones = get_transport_zones(cluster) - print("\tTransport zones: %s" % transport_zones) - if cfg.CONF.default_tz_uuid not in transport_zones: - print("\t\tError: specified default transport zone " - "(%s) is missing from NSX transport zones!" - % cfg.CONF.default_tz_uuid) - errors += 1 - transport_nodes = get_transport_nodes(cluster) - print("\tTransport nodes: %s" % transport_nodes) - node_errors = [] - for node in transport_nodes: - if not is_transport_node_connected(cluster, node): - node_errors.append(node) - - # Use different exit codes, so that we can distinguish - # between config and runtime errors - if len(node_errors): - print("\nThere are one or mode transport nodes that are " - "not connected: %s. Please, revise!" % node_errors) - sys.exit(10) - elif errors: - print("\nThere are %d errors with your configuration. " - "Please, revise!" % errors) - sys.exit(12) - else: - print("Done.") diff --git a/neutron/plugins/vmware/common/__init__.py b/neutron/plugins/vmware/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/common/config.py b/neutron/plugins/vmware/common/config.py deleted file mode 100644 index c75f982a6..000000000 --- a/neutron/plugins/vmware/common/config.py +++ /dev/null @@ -1,198 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 VMware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.plugins.vmware.common import exceptions as nsx_exc - - -class AgentModes: - AGENT = 'agent' - AGENTLESS = 'agentless' - COMBINED = 'combined' - - -class MetadataModes: - DIRECT = 'access_network' - INDIRECT = 'dhcp_host_route' - - -class ReplicationModes: - SERVICE = 'service' - SOURCE = 'source' - - -base_opts = [ - cfg.IntOpt('max_lp_per_bridged_ls', default=5000, - deprecated_group='NVP', - help=_("Maximum number of ports of a logical switch on a " - "bridged transport zone (default 5000)")), - cfg.IntOpt('max_lp_per_overlay_ls', default=256, - deprecated_group='NVP', - help=_("Maximum number of ports of a logical switch on an " - "overlay transport zone (default 256)")), - cfg.IntOpt('concurrent_connections', default=10, - deprecated_group='NVP', - help=_("Maximum concurrent connections to each NSX " - "controller.")), - cfg.IntOpt('nsx_gen_timeout', default=-1, - deprecated_name='nvp_gen_timeout', - deprecated_group='NVP', - help=_("Number of seconds a generation id should be valid for " - "(default -1 meaning do not time out)")), - cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, - deprecated_group='NVP', - help=_("If set to access_network this enables a dedicated " - "connection to the metadata proxy for metadata server " - "access via Neutron router. If set to dhcp_host_route " - "this enables host route injection via the dhcp agent. " - "This option is only useful if running on a host that " - "does not support namespaces otherwise access_network " - "should be used.")), - cfg.StrOpt('default_transport_type', default='stt', - deprecated_group='NVP', - help=_("The default network tranport type to use (stt, gre, " - "bridge, ipsec_gre, or ipsec_stt)")), - cfg.StrOpt('agent_mode', default=AgentModes.AGENT, - deprecated_group='NVP', - help=_("The mode used to implement DHCP/metadata services.")), - cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE, - help=_("The default option leverages service nodes to perform" - " packet replication though one could set to this to " - "'source' to perform replication locally. This is useful" - " if one does not want to deploy a service node(s).")) -] - -sync_opts = [ - cfg.IntOpt('state_sync_interval', default=10, - deprecated_group='NVP_SYNC', - help=_("Interval in seconds between runs of the state " - "synchronization task. Set it to 0 to disable it")), - cfg.IntOpt('max_random_sync_delay', default=0, - deprecated_group='NVP_SYNC', - help=_("Maximum value for the additional random " - "delay in seconds between runs of the state " - "synchronization task")), - cfg.IntOpt('min_sync_req_delay', default=1, - deprecated_group='NVP_SYNC', - help=_('Minimum delay, in seconds, between two state ' - 'synchronization queries to NSX. It must not ' - 'exceed state_sync_interval')), - cfg.IntOpt('min_chunk_size', default=500, - deprecated_group='NVP_SYNC', - help=_('Minimum number of resources to be retrieved from NSX ' - 'during state synchronization')), - cfg.BoolOpt('always_read_status', default=False, - deprecated_group='NVP_SYNC', - help=_('Always read operational status from backend on show ' - 'operations. Enabling this option might slow down ' - 'the system.')) -] - -connection_opts = [ - cfg.StrOpt('nsx_user', - default='admin', - deprecated_name='nvp_user', - help=_('User name for NSX controllers in this cluster')), - cfg.StrOpt('nsx_password', - default='admin', - deprecated_name='nvp_password', - secret=True, - help=_('Password for NSX controllers in this cluster')), - cfg.IntOpt('req_timeout', - default=30, - help=_('Total time limit for a cluster request')), - cfg.IntOpt('http_timeout', - default=30, - help=_('Time before aborting a request')), - cfg.IntOpt('retries', - default=2, - help=_('Number of time a request should be retried')), - cfg.IntOpt('redirects', - default=2, - help=_('Number of times a redirect should be followed')), - cfg.ListOpt('nsx_controllers', - deprecated_name='nvp_controllers', - help=_("Lists the NSX controllers in this cluster")), -] - -cluster_opts = [ - cfg.StrOpt('default_tz_uuid', - help=_("This is uuid of the default NSX Transport zone that " - "will be used for creating tunneled isolated " - "\"Neutron\" networks. It needs to be created in NSX " - "before starting Neutron with the nsx plugin.")), - cfg.StrOpt('default_l3_gw_service_uuid', - help=_("Unique identifier of the NSX L3 Gateway service " - "which will be used for implementing routers and " - "floating IPs")), - cfg.StrOpt('default_l2_gw_service_uuid', - help=_("Unique identifier of the NSX L2 Gateway service " - "which will be used by default for network gateways")), - cfg.StrOpt('default_service_cluster_uuid', - help=_("Unique identifier of the Service Cluster which will " - "be used by logical services like dhcp and metadata")), - cfg.StrOpt('default_interface_name', default='breth0', - help=_("Name of the interface on a L2 Gateway transport node" - "which should be used by default when setting up a " - "network connection")), -] - -DEFAULT_STATUS_CHECK_INTERVAL = 2000 - -vcns_opts = [ - cfg.StrOpt('user', - default='admin', - help=_('User name for vsm')), - cfg.StrOpt('password', - default='default', - secret=True, - help=_('Password for vsm')), - cfg.StrOpt('manager_uri', - help=_('uri for vsm')), - cfg.StrOpt('datacenter_moid', - help=_('Optional parameter identifying the ID of datacenter ' - 'to deploy NSX Edges')), - cfg.StrOpt('deployment_container_id', - help=_('Optional parameter identifying the ID of datastore to ' - 'deploy NSX Edges')), - cfg.StrOpt('resource_pool_id', - help=_('Optional parameter identifying the ID of resource to ' - 'deploy NSX Edges')), - cfg.StrOpt('datastore_id', - help=_('Optional parameter identifying the ID of datastore to ' - 'deploy NSX Edges')), - cfg.StrOpt('external_network', - help=_('Network ID for physical network connectivity')), - cfg.IntOpt('task_status_check_interval', - default=DEFAULT_STATUS_CHECK_INTERVAL, - help=_("Task status check interval")) -] - -# Register the configuration options -cfg.CONF.register_opts(connection_opts) -cfg.CONF.register_opts(cluster_opts) -cfg.CONF.register_opts(vcns_opts, group="vcns") -cfg.CONF.register_opts(base_opts, group="NSX") -cfg.CONF.register_opts(sync_opts, group="NSX_SYNC") - - -def validate_config_options(): - if cfg.CONF.NSX.replication_mode not in (ReplicationModes.SERVICE, - ReplicationModes.SOURCE): - error = (_("Invalid replication_mode: %s") % - cfg.CONF.NSX.replication_mode) - raise nsx_exc.NsxPluginException(err_msg=error) diff --git a/neutron/plugins/vmware/common/exceptions.py b/neutron/plugins/vmware/common/exceptions.py deleted file mode 100644 index 83cc05bf4..000000000 --- a/neutron/plugins/vmware/common/exceptions.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2012 VMware, Inc -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import exceptions as n_exc - - -class NsxPluginException(n_exc.NeutronException): - message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s") - - -class InvalidVersion(NsxPluginException): - message = _("Unable to fulfill request with version %(version)s.") - - -class InvalidConnection(NsxPluginException): - message = _("Invalid NSX connection parameters: %(conn_params)s") - - -class InvalidClusterConfiguration(NsxPluginException): - message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " - "that these values are specified in the [DEFAULT] " - "section of the NSX plugin ini file.") - - -class InvalidNovaZone(NsxPluginException): - message = _("Unable to find cluster config entry " - "for nova zone: %(nova_zone)s") - - -class NoMorePortsException(NsxPluginException): - message = _("Unable to create port on network %(network)s. " - "Maximum number of ports reached") - - -class NatRuleMismatch(NsxPluginException): - message = _("While retrieving NAT rules, %(actual_rules)s were found " - "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " - "were expected") - - -class InvalidAttachmentType(NsxPluginException): - message = _("Invalid NSX attachment type '%(attachment_type)s'") - - -class MaintenanceInProgress(NsxPluginException): - message = _("The networking backend is currently in maintenance mode and " - "therefore unable to accept requests which modify its state. " - "Please try later.") - - -class L2GatewayAlreadyInUse(n_exc.Conflict): - message = _("Gateway Service %(gateway)s is already in use") - - -class InvalidSecurityCertificate(NsxPluginException): - message = _("An invalid security certificate was specified for the " - "gateway device. Certificates must be enclosed between " - "'-----BEGIN CERTIFICATE-----' and " - "'-----END CERTIFICATE-----'") - - -class ServiceOverQuota(n_exc.Conflict): - message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s") - - -class RouterInUseByLBService(n_exc.InUse): - message = _("Router %(router_id)s is in use by Loadbalancer Service " - "%(vip_id)s") - - -class RouterInUseByFWService(n_exc.InUse): - message = _("Router %(router_id)s is in use by firewall Service " - "%(firewall_id)s") - - -class VcnsDriverException(NsxPluginException): - message = _("Error happened in NSX VCNS Driver: %(err_msg)s") - - -class AdvRouterServiceUnavailable(n_exc.ServiceUnavailable): - message = _("Router %(router_id)s is not in 'ACTIVE' " - "status, thus unable to provide advanced service") - - -class ServiceClusterUnavailable(NsxPluginException): - message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, " - "check NSX setup and/or configuration") - - -class PortConfigurationError(NsxPluginException): - message = _("An error occurred while connecting LSN %(lsn_id)s " - "and network %(net_id)s via port %(port_id)s") - - def __init__(self, **kwargs): - super(PortConfigurationError, self).__init__(**kwargs) - self.port_id = kwargs.get('port_id') - - -class LsnNotFound(n_exc.NotFound): - message = _('Unable to find LSN for %(entity)s %(entity_id)s') - - -class LsnPortNotFound(n_exc.NotFound): - message = (_('Unable to find port for LSN %(lsn_id)s ' - 'and %(entity)s %(entity_id)s')) - - -class LsnMigrationConflict(n_exc.Conflict): - message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") - - -class LsnConfigurationConflict(NsxPluginException): - message = _("Configuration conflict on Logical Service Node %(lsn_id)s") diff --git a/neutron/plugins/vmware/common/nsx_utils.py b/neutron/plugins/vmware/common/nsx_utils.py deleted file mode 100644 index c2c2b7f28..000000000 --- a/neutron/plugins/vmware/common/nsx_utils.py +++ /dev/null @@ -1,249 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import exceptions as n_exc -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import client -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.dbexts import db as nsx_db -from neutron.plugins.vmware.dbexts import networkgw_db -from neutron.plugins.vmware import nsx_cluster -from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib -from neutron.plugins.vmware.nsxlib import router as routerlib -from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib -from neutron.plugins.vmware.nsxlib import switch as switchlib - -LOG = log.getLogger(__name__) - - -def fetch_nsx_switches(session, cluster, neutron_net_id): - """Retrieve logical switches for a neutron network. - - This function is optimized for fetching all the lswitches always - with a single NSX query. - If there is more than 1 logical switch (chained switches use case) - NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX - lswitch is directly retrieved by id (more efficient). - """ - nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id) - if len(nsx_switch_ids) > 1: - lswitches = switchlib.get_lswitches(cluster, neutron_net_id) - else: - lswitches = [switchlib.get_lswitch_by_id( - cluster, nsx_switch_ids[0])] - return lswitches - - -def get_nsx_switch_ids(session, cluster, neutron_network_id): - """Return the NSX switch id for a given neutron network. - - First lookup for mappings in Neutron database. If no mapping is - found, query the NSX backend and add the mappings. - """ - nsx_switch_ids = nsx_db.get_nsx_switch_ids( - session, neutron_network_id) - if not nsx_switch_ids: - # Find logical switches from backend. - # This is a rather expensive query, but it won't be executed - # more than once for each network in Neutron's lifetime - nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) - if not nsx_switches: - LOG.warn(_("Unable to find NSX switches for Neutron network %s"), - neutron_network_id) - return - nsx_switch_ids = [] - with session.begin(subtransactions=True): - for nsx_switch in nsx_switches: - nsx_switch_id = nsx_switch['uuid'] - nsx_switch_ids.append(nsx_switch_id) - # Create DB mapping - nsx_db.add_neutron_nsx_network_mapping( - session, - neutron_network_id, - nsx_switch_id) - return nsx_switch_ids - - -def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): - """Return the NSX switch and port uuids for a given neutron port. - - First, look up the Neutron database. If not found, execute - a query on NSX platform as the mapping might be missing because - the port was created before upgrading to grizzly. - - This routine also retrieves the identifier of the logical switch in - the backend where the port is plugged. Prior to Icehouse this - information was not available in the Neutron Database. For dealing - with pre-existing records, this routine will query the backend - for retrieving the correct switch identifier. - - As of Icehouse release it is not indeed anymore possible to assume - the backend logical switch identifier is equal to the neutron - network identifier. - """ - nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( - session, neutron_port_id) - if not nsx_switch_id: - # Find logical switch for port from backend - # This is a rather expensive query, but it won't be executed - # more than once for each port in Neutron's lifetime - nsx_ports = switchlib.query_lswitch_lports( - cluster, '*', relations='LogicalSwitchConfig', - filters={'tag': neutron_port_id, - 'tag_scope': 'q_port_id'}) - # Only one result expected - # NOTE(salv-orlando): Not handling the case where more than one - # port is found with the same neutron port tag - if not nsx_ports: - LOG.warn(_("Unable to find NSX port for Neutron port %s"), - neutron_port_id) - # This method is supposed to return a tuple - return None, None - nsx_port = nsx_ports[0] - nsx_switch_id = (nsx_port['_relations'] - ['LogicalSwitchConfig']['uuid']) - if nsx_port_id: - # Mapping already exists. Delete before recreating - nsx_db.delete_neutron_nsx_port_mapping( - session, neutron_port_id) - else: - nsx_port_id = nsx_port['uuid'] - # (re)Create DB mapping - nsx_db.add_neutron_nsx_port_mapping( - session, neutron_port_id, - nsx_switch_id, nsx_port_id) - return nsx_switch_id, nsx_port_id - - -def get_nsx_security_group_id(session, cluster, neutron_id): - """Return the NSX sec profile uuid for a given neutron sec group. - - First, look up the Neutron database. If not found, execute - a query on NSX platform as the mapping might be missing. - NOTE: Security groups are called 'security profiles' on the NSX backend. - """ - nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id) - if not nsx_id: - # Find security profile on backend. - # This is a rather expensive query, but it won't be executed - # more than once for each security group in Neutron's lifetime - nsx_sec_profiles = secgrouplib.query_security_profiles( - cluster, '*', - filters={'tag': neutron_id, - 'tag_scope': 'q_sec_group_id'}) - # Only one result expected - # NOTE(salv-orlando): Not handling the case where more than one - # security profile is found with the same neutron port tag - if not nsx_sec_profiles: - LOG.warn(_("Unable to find NSX security profile for Neutron " - "security group %s"), neutron_id) - return - elif len(nsx_sec_profiles) > 1: - LOG.warn(_("Multiple NSX security profiles found for Neutron " - "security group %s"), neutron_id) - nsx_sec_profile = nsx_sec_profiles[0] - nsx_id = nsx_sec_profile['uuid'] - with session.begin(subtransactions=True): - # Create DB mapping - nsx_db.add_neutron_nsx_security_group_mapping( - session, neutron_id, nsx_id) - return nsx_id - - -def get_nsx_router_id(session, cluster, neutron_router_id): - """Return the NSX router uuid for a given neutron router. - - First, look up the Neutron database. If not found, execute - a query on NSX platform as the mapping might be missing. - """ - nsx_router_id = nsx_db.get_nsx_router_id( - session, neutron_router_id) - if not nsx_router_id: - # Find logical router from backend. - # This is a rather expensive query, but it won't be executed - # more than once for each router in Neutron's lifetime - nsx_routers = routerlib.query_lrouters( - cluster, '*', - filters={'tag': neutron_router_id, - 'tag_scope': 'q_router_id'}) - # Only one result expected - # NOTE(salv-orlando): Not handling the case where more than one - # port is found with the same neutron port tag - if not nsx_routers: - LOG.warn(_("Unable to find NSX router for Neutron router %s"), - neutron_router_id) - return - nsx_router = nsx_routers[0] - nsx_router_id = nsx_router['uuid'] - with session.begin(subtransactions=True): - # Create DB mapping - nsx_db.add_neutron_nsx_router_mapping( - session, - neutron_router_id, - nsx_router_id) - return nsx_router_id - - -def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout): - cluster = nsx_cluster.NSXCluster(**cluster_opts) - - def _ctrl_split(x, y): - return (x, int(y), True) - - api_providers = [_ctrl_split(*ctrl.split(':')) - for ctrl in cluster.nsx_controllers] - cluster.api_client = client.NsxApiClient( - api_providers, cluster.nsx_user, cluster.nsx_password, - request_timeout=cluster.req_timeout, - http_timeout=cluster.http_timeout, - retries=cluster.retries, - redirects=cluster.redirects, - concurrent_connections=concurrent_connections, - gen_timeout=gen_timeout) - return cluster - - -def get_nsx_device_status(cluster, nsx_uuid): - try: - status_up = l2gwlib.get_gateway_device_status( - cluster, nsx_uuid) - if status_up: - return networkgw_db.STATUS_ACTIVE - else: - return networkgw_db.STATUS_DOWN - except api_exc.NsxApiException: - return networkgw_db.STATUS_UNKNOWN - except n_exc.NotFound: - return networkgw_db.ERROR - - -def get_nsx_device_statuses(cluster, tenant_id): - try: - status_dict = l2gwlib.get_gateway_devices_status( - cluster, tenant_id) - return dict((nsx_device_id, - networkgw_db.STATUS_ACTIVE if connected - else networkgw_db.STATUS_DOWN) for - (nsx_device_id, connected) in status_dict.iteritems()) - except api_exc.NsxApiException: - # Do not make a NSX API exception fatal - if tenant_id: - LOG.warn(_("Unable to retrieve operational status for gateway " - "devices belonging to tenant: %s"), tenant_id) - else: - LOG.warn(_("Unable to retrieve operational status for " - "gateway devices")) diff --git a/neutron/plugins/vmware/common/securitygroups.py b/neutron/plugins/vmware/common/securitygroups.py deleted file mode 100644 index db61b72a8..000000000 --- a/neutron/plugins/vmware/common/securitygroups.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.openstack.common import log -from neutron.plugins.vmware.common import nsx_utils - -LOG = log.getLogger(__name__) -# Protocol number look up for supported protocols -protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17} - - -def _convert_to_nsx_rule(session, cluster, rule, with_id=False): - """Converts a Neutron security group rule to the NSX format. - - This routine also replaces Neutron IDs with NSX UUIDs. - """ - nsx_rule = {} - params = ['remote_ip_prefix', 'protocol', - 'remote_group_id', 'port_range_min', - 'port_range_max', 'ethertype'] - if with_id: - params.append('id') - - for param in params: - value = rule.get(param) - if param not in rule: - nsx_rule[param] = value - elif not value: - pass - elif param == 'remote_ip_prefix': - nsx_rule['ip_prefix'] = rule['remote_ip_prefix'] - elif param == 'remote_group_id': - nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id( - session, cluster, rule['remote_group_id']) - - elif param == 'protocol': - try: - nsx_rule['protocol'] = int(rule['protocol']) - except (ValueError, TypeError): - nsx_rule['protocol'] = ( - protocol_num_look_up[rule['protocol']]) - else: - nsx_rule[param] = value - return nsx_rule - - -def _convert_to_nsx_rules(session, cluster, rules, with_id=False): - """Converts a list of Neutron security group rules to the NSX format.""" - nsx_rules = {'logical_port_ingress_rules': [], - 'logical_port_egress_rules': []} - for direction in ['logical_port_ingress_rules', - 'logical_port_egress_rules']: - for rule in rules[direction]: - nsx_rules[direction].append( - _convert_to_nsx_rule(session, cluster, rule, with_id)) - return nsx_rules - - -def get_security_group_rules_nsx_format(session, cluster, - security_group_rules, with_id=False): - """Convert neutron security group rules into NSX format. - - This routine splits Neutron security group rules into two lists, one - for ingress rules and the other for egress rules. - """ - - def fields(rule): - _fields = ['remote_ip_prefix', 'remote_group_id', 'protocol', - 'port_range_min', 'port_range_max', 'protocol', 'ethertype'] - if with_id: - _fields.append('id') - return dict((k, v) for k, v in rule.iteritems() if k in _fields) - - ingress_rules = [] - egress_rules = [] - for rule in security_group_rules: - if rule.get('souce_group_id'): - rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id( - session, cluster, rule['remote_group_id']) - - if rule['direction'] == 'ingress': - ingress_rules.append(fields(rule)) - elif rule['direction'] == 'egress': - egress_rules.append(fields(rule)) - rules = {'logical_port_ingress_rules': egress_rules, - 'logical_port_egress_rules': ingress_rules} - return _convert_to_nsx_rules(session, cluster, rules, with_id) - - -def merge_security_group_rules_with_current(session, cluster, - new_rules, current_rules): - merged_rules = get_security_group_rules_nsx_format( - session, cluster, current_rules) - for new_rule in new_rules: - rule = new_rule['security_group_rule'] - if rule['direction'] == 'ingress': - merged_rules['logical_port_egress_rules'].append( - _convert_to_nsx_rule(session, cluster, rule)) - elif rule['direction'] == 'egress': - merged_rules['logical_port_ingress_rules'].append( - _convert_to_nsx_rule(session, cluster, rule)) - return merged_rules - - -def remove_security_group_with_id_and_id_field(rules, rule_id): - """Remove rule by rule_id. - - This function receives all of the current rule associated with a - security group and then removes the rule that matches the rule_id. In - addition it removes the id field in the dict with each rule since that - should not be passed to nsx. - """ - for rule_direction in rules.values(): - item_to_remove = None - for port_rule in rule_direction: - if port_rule['id'] == rule_id: - item_to_remove = port_rule - else: - # remove key from dictionary for NSX - del port_rule['id'] - if item_to_remove: - rule_direction.remove(item_to_remove) diff --git a/neutron/plugins/vmware/common/sync.py b/neutron/plugins/vmware/common/sync.py deleted file mode 100644 index 76c6a27f4..000000000 --- a/neutron/plugins/vmware/common/sync.py +++ /dev/null @@ -1,669 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from neutron.common import constants -from neutron.common import exceptions -from neutron import context -from neutron.db import external_net_db -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.extensions import l3 -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.openstack.common import loopingcall -from neutron.openstack.common import timeutils -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import nsx_utils -from neutron.plugins.vmware import nsxlib -from neutron.plugins.vmware.nsxlib import router as routerlib -from neutron.plugins.vmware.nsxlib import switch as switchlib - -# Maximum page size for a single request -# NOTE(salv-orlando): This might become a version-dependent map should the -# limit be raised in future versions -MAX_PAGE_SIZE = 5000 - -LOG = log.getLogger(__name__) - - -class NsxCache(object): - """A simple Cache for NSX resources. - - Associates resource id with resource hash to rapidly identify - updated resources. - Each entry in the cache also stores the following information: - - changed: the resource in the cache has been altered following - an update or a delete - - hit: the resource has been visited during an update (and possibly - left unchanged) - - data: current resource data - - data_bk: backup of resource data prior to its removal - """ - - def __init__(self): - # Maps a uuid to the dict containing it - self._uuid_dict_mappings = {} - # Dicts for NSX cached resources - self._lswitches = {} - self._lswitchports = {} - self._lrouters = {} - - def __getitem__(self, key): - # uuids are unique across the various types of resources - # TODO(salv-orlando): Avoid lookups over all dictionaries - # when retrieving items - # Fetch lswitches, lports, or lrouters - resources = self._uuid_dict_mappings[key] - return resources[key] - - def _update_resources(self, resources, new_resources): - # Clear the 'changed' attribute for all items - for uuid, item in resources.items(): - if item.pop('changed', None) and not item.get('data'): - # The item is not anymore in NSX, so delete it - del resources[uuid] - del self._uuid_dict_mappings[uuid] - LOG.debug("Removed item %s from NSX object cache", uuid) - - def do_hash(item): - return hash(jsonutils.dumps(item)) - - # Parse new data and identify new, deleted, and updated resources - for item in new_resources: - item_id = item['uuid'] - if resources.get(item_id): - new_hash = do_hash(item) - if new_hash != resources[item_id]['hash']: - resources[item_id]['hash'] = new_hash - resources[item_id]['changed'] = True - resources[item_id]['data_bk'] = ( - resources[item_id]['data']) - resources[item_id]['data'] = item - # Mark the item as hit in any case - resources[item_id]['hit'] = True - LOG.debug("Updating item %s in NSX object cache", item_id) - else: - resources[item_id] = {'hash': do_hash(item)} - resources[item_id]['hit'] = True - resources[item_id]['changed'] = True - resources[item_id]['data'] = item - # add a uuid to dict mapping for easy retrieval - # with __getitem__ - self._uuid_dict_mappings[item_id] = resources - LOG.debug("Added item %s to NSX object cache", item_id) - - def _delete_resources(self, resources): - # Mark for removal all the elements which have not been visited. - # And clear the 'hit' attribute. - for to_delete in [k for (k, v) in resources.iteritems() - if not v.pop('hit', False)]: - resources[to_delete]['changed'] = True - resources[to_delete]['data_bk'] = ( - resources[to_delete].pop('data', None)) - - def _get_resource_ids(self, resources, changed_only): - if changed_only: - return [k for (k, v) in resources.iteritems() - if v.get('changed')] - return resources.keys() - - def get_lswitches(self, changed_only=False): - return self._get_resource_ids(self._lswitches, changed_only) - - def get_lrouters(self, changed_only=False): - return self._get_resource_ids(self._lrouters, changed_only) - - def get_lswitchports(self, changed_only=False): - return self._get_resource_ids(self._lswitchports, changed_only) - - def update_lswitch(self, lswitch): - self._update_resources(self._lswitches, [lswitch]) - - def update_lrouter(self, lrouter): - self._update_resources(self._lrouters, [lrouter]) - - def update_lswitchport(self, lswitchport): - self._update_resources(self._lswitchports, [lswitchport]) - - def process_updates(self, lswitches=None, - lrouters=None, lswitchports=None): - self._update_resources(self._lswitches, lswitches) - self._update_resources(self._lrouters, lrouters) - self._update_resources(self._lswitchports, lswitchports) - return (self._get_resource_ids(self._lswitches, changed_only=True), - self._get_resource_ids(self._lrouters, changed_only=True), - self._get_resource_ids(self._lswitchports, changed_only=True)) - - def process_deletes(self): - self._delete_resources(self._lswitches) - self._delete_resources(self._lrouters) - self._delete_resources(self._lswitchports) - return (self._get_resource_ids(self._lswitches, changed_only=True), - self._get_resource_ids(self._lrouters, changed_only=True), - self._get_resource_ids(self._lswitchports, changed_only=True)) - - -class SyncParameters(): - """Defines attributes used by the synchronization procedure. - - chunk_size: Actual chunk size - extra_chunk_size: Additional data to fetch because of chunk size - adjustment - current_chunk: Counter of the current data chunk being synchronized - Page cursors: markers for the next resource to fetch. - 'start' means page cursor unset for fetching 1st page - init_sync_performed: True if the initial synchronization concluded - """ - - def __init__(self, min_chunk_size): - self.chunk_size = min_chunk_size - self.extra_chunk_size = 0 - self.current_chunk = 0 - self.ls_cursor = 'start' - self.lr_cursor = 'start' - self.lp_cursor = 'start' - self.init_sync_performed = False - self.total_size = 0 - - -def _start_loopingcall(min_chunk_size, state_sync_interval, func): - """Start a loopingcall for the synchronization task.""" - # Start a looping call to synchronize operational status - # for neutron resources - if not state_sync_interval: - # do not start the looping call if specified - # sync interval is 0 - return - state_synchronizer = loopingcall.DynamicLoopingCall( - func, sp=SyncParameters(min_chunk_size)) - state_synchronizer.start( - periodic_interval_max=state_sync_interval) - return state_synchronizer - - -class NsxSynchronizer(): - - LS_URI = nsxlib._build_uri_path( - switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status', - relations='LogicalSwitchStatus') - LR_URI = nsxlib._build_uri_path( - routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status', - relations='LogicalRouterStatus') - LP_URI = nsxlib._build_uri_path( - switchlib.LSWITCHPORT_RESOURCE, - parent_resource_id='*', - fields='uuid,tags,fabric_status_up', - relations='LogicalPortStatus') - - def __init__(self, plugin, cluster, state_sync_interval, - req_delay, min_chunk_size, max_rand_delay=0): - random.seed() - self._nsx_cache = NsxCache() - # Store parameters as instance members - # NOTE(salv-orlando): apologies if it looks java-ish - self._plugin = plugin - self._cluster = cluster - self._req_delay = req_delay - self._sync_interval = state_sync_interval - self._max_rand_delay = max_rand_delay - # Validate parameters - if self._sync_interval < self._req_delay: - err_msg = (_("Minimum request delay:%(req_delay)s must not " - "exceed synchronization interval:%(sync_interval)s") % - {'req_delay': self._req_delay, - 'sync_interval': self._sync_interval}) - LOG.error(err_msg) - raise nsx_exc.NsxPluginException(err_msg=err_msg) - # Backoff time in case of failures while fetching sync data - self._sync_backoff = 1 - # Store the looping call in an instance variable to allow unit tests - # for controlling its lifecycle - self._sync_looping_call = _start_loopingcall( - min_chunk_size, state_sync_interval, self._synchronize_state) - - def _get_tag_dict(self, tags): - return dict((tag.get('scope'), tag['tag']) for tag in tags) - - def synchronize_network(self, context, neutron_network_data, - lswitches=None): - """Synchronize a Neutron network with its NSX counterpart. - - This routine synchronizes a set of switches when a Neutron - network is mapped to multiple lswitches. - """ - if not lswitches: - # Try to get logical switches from nsx - try: - lswitches = nsx_utils.fetch_nsx_switches( - context.session, self._cluster, - neutron_network_data['id']) - except exceptions.NetworkNotFound: - # TODO(salv-orlando): We should be catching - # api_exc.ResourceNotFound here - # The logical switch was not found - LOG.warning(_("Logical switch for neutron network %s not " - "found on NSX."), neutron_network_data['id']) - lswitches = [] - else: - for lswitch in lswitches: - self._nsx_cache.update_lswitch(lswitch) - # By default assume things go wrong - status = constants.NET_STATUS_ERROR - # In most cases lswitches will contain a single element - for ls in lswitches: - if not ls: - # Logical switch was deleted - break - ls_status = ls['_relations']['LogicalSwitchStatus'] - if not ls_status['fabric_status']: - status = constants.NET_STATUS_DOWN - break - else: - # No switch was down or missing. Set status to ACTIVE unless - # there were no switches in the first place! - if lswitches: - status = constants.NET_STATUS_ACTIVE - # Update db object - if status == neutron_network_data['status']: - # do nothing - return - - with context.session.begin(subtransactions=True): - try: - network = self._plugin._get_network(context, - neutron_network_data['id']) - except exceptions.NetworkNotFound: - pass - else: - network.status = status - LOG.debug(_("Updating status for neutron resource %(q_id)s to:" - " %(status)s"), - {'q_id': neutron_network_data['id'], - 'status': status}) - - def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False): - if not ls_uuids and not scan_missing: - return - neutron_net_ids = set() - neutron_nsx_mappings = {} - # TODO(salvatore-orlando): Deal with the case the tag - # has been tampered with - for ls_uuid in ls_uuids: - # If the lswitch has been deleted, get backup copy of data - lswitch = (self._nsx_cache[ls_uuid].get('data') or - self._nsx_cache[ls_uuid].get('data_bk')) - tags = self._get_tag_dict(lswitch['tags']) - neutron_id = tags.get('quantum_net_id') - neutron_net_ids.add(neutron_id) - neutron_nsx_mappings[neutron_id] = ( - neutron_nsx_mappings.get(neutron_id, []) + - [self._nsx_cache[ls_uuid]]) - # Fetch neutron networks from database - filters = {'router:external': [False]} - if not scan_missing: - filters['id'] = neutron_net_ids - - networks = self._plugin._get_collection( - ctx, models_v2.Network, self._plugin._make_network_dict, - filters=filters) - - for network in networks: - lswitches = neutron_nsx_mappings.get(network['id'], []) - lswitches = [lswitch.get('data') for lswitch in lswitches] - self.synchronize_network(ctx, network, lswitches) - - def synchronize_router(self, context, neutron_router_data, - lrouter=None): - """Synchronize a neutron router with its NSX counterpart.""" - if not lrouter: - # Try to get router from nsx - try: - # This query will return the logical router status too - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self._cluster, neutron_router_data['id']) - if nsx_router_id: - lrouter = routerlib.get_lrouter( - self._cluster, nsx_router_id) - except exceptions.NotFound: - # NOTE(salv-orlando): We should be catching - # api_exc.ResourceNotFound here - # The logical router was not found - LOG.warning(_("Logical router for neutron router %s not " - "found on NSX."), neutron_router_data['id']) - if lrouter: - # Update the cache - self._nsx_cache.update_lrouter(lrouter) - - # Note(salv-orlando): It might worth adding a check to verify neutron - # resource tag in nsx entity matches a Neutron id. - # By default assume things go wrong - status = constants.NET_STATUS_ERROR - if lrouter: - lr_status = (lrouter['_relations'] - ['LogicalRouterStatus'] - ['fabric_status']) - status = (lr_status and - constants.NET_STATUS_ACTIVE - or constants.NET_STATUS_DOWN) - # Update db object - if status == neutron_router_data['status']: - # do nothing - return - - with context.session.begin(subtransactions=True): - try: - router = self._plugin._get_router(context, - neutron_router_data['id']) - except l3.RouterNotFound: - pass - else: - router.status = status - LOG.debug(_("Updating status for neutron resource %(q_id)s to:" - " %(status)s"), - {'q_id': neutron_router_data['id'], - 'status': status}) - - def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False): - if not lr_uuids and not scan_missing: - return - # TODO(salvatore-orlando): Deal with the case the tag - # has been tampered with - neutron_router_mappings = {} - for lr_uuid in lr_uuids: - lrouter = (self._nsx_cache[lr_uuid].get('data') or - self._nsx_cache[lr_uuid].get('data_bk')) - tags = self._get_tag_dict(lrouter['tags']) - neutron_router_id = tags.get('q_router_id') - if neutron_router_id: - neutron_router_mappings[neutron_router_id] = ( - self._nsx_cache[lr_uuid]) - else: - LOG.warn(_("Unable to find Neutron router id for " - "NSX logical router: %s"), lr_uuid) - # Fetch neutron routers from database - filters = ({} if scan_missing else - {'id': neutron_router_mappings.keys()}) - routers = self._plugin._get_collection( - ctx, l3_db.Router, self._plugin._make_router_dict, - filters=filters) - for router in routers: - lrouter = neutron_router_mappings.get(router['id']) - self.synchronize_router( - ctx, router, lrouter and lrouter.get('data')) - - def synchronize_port(self, context, neutron_port_data, - lswitchport=None, ext_networks=None): - """Synchronize a Neutron port with its NSX counterpart.""" - # Skip synchronization for ports on external networks - if not ext_networks: - ext_networks = [net['id'] for net in context.session.query( - models_v2.Network).join( - external_net_db.ExternalNetwork, - (models_v2.Network.id == - external_net_db.ExternalNetwork.network_id))] - if neutron_port_data['network_id'] in ext_networks: - with context.session.begin(subtransactions=True): - neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE - return - - if not lswitchport: - # Try to get port from nsx - try: - ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id( - context.session, self._cluster, neutron_port_data['id']) - if lp_uuid: - lswitchport = switchlib.get_port( - self._cluster, ls_uuid, lp_uuid, - relations='LogicalPortStatus') - except (exceptions.PortNotFoundOnNetwork): - # NOTE(salv-orlando): We should be catching - # api_exc.ResourceNotFound here instead - # of PortNotFoundOnNetwork when the id exists but - # the logical switch port was not found - LOG.warning(_("Logical switch port for neutron port %s " - "not found on NSX."), neutron_port_data['id']) - lswitchport = None - else: - # If lswitchport is not None, update the cache. - # It could be none if the port was deleted from the backend - if lswitchport: - self._nsx_cache.update_lswitchport(lswitchport) - # Note(salv-orlando): It might worth adding a check to verify neutron - # resource tag in nsx entity matches Neutron id. - # By default assume things go wrong - status = constants.PORT_STATUS_ERROR - if lswitchport: - lp_status = (lswitchport['_relations'] - ['LogicalPortStatus'] - ['fabric_status_up']) - status = (lp_status and - constants.PORT_STATUS_ACTIVE - or constants.PORT_STATUS_DOWN) - - # Update db object - if status == neutron_port_data['status']: - # do nothing - return - - with context.session.begin(subtransactions=True): - try: - port = self._plugin._get_port(context, - neutron_port_data['id']) - except exceptions.PortNotFound: - pass - else: - port.status = status - LOG.debug(_("Updating status for neutron resource %(q_id)s to:" - " %(status)s"), - {'q_id': neutron_port_data['id'], - 'status': status}) - - def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False): - if not lp_uuids and not scan_missing: - return - # Find Neutron port id by tag - the tag is already - # loaded in memory, no reason for doing a db query - # TODO(salvatore-orlando): Deal with the case the tag - # has been tampered with - neutron_port_mappings = {} - for lp_uuid in lp_uuids: - lport = (self._nsx_cache[lp_uuid].get('data') or - self._nsx_cache[lp_uuid].get('data_bk')) - tags = self._get_tag_dict(lport['tags']) - neutron_port_id = tags.get('q_port_id') - if neutron_port_id: - neutron_port_mappings[neutron_port_id] = ( - self._nsx_cache[lp_uuid]) - # Fetch neutron ports from database - # At the first sync we need to fetch all ports - filters = ({} if scan_missing else - {'id': neutron_port_mappings.keys()}) - # TODO(salv-orlando): Work out a solution for avoiding - # this query - ext_nets = [net['id'] for net in ctx.session.query( - models_v2.Network).join( - external_net_db.ExternalNetwork, - (models_v2.Network.id == - external_net_db.ExternalNetwork.network_id))] - ports = self._plugin._get_collection( - ctx, models_v2.Port, self._plugin._make_port_dict, - filters=filters) - for port in ports: - lswitchport = neutron_port_mappings.get(port['id']) - self.synchronize_port( - ctx, port, lswitchport and lswitchport.get('data'), - ext_networks=ext_nets) - - def _get_chunk_size(self, sp): - # NOTE(salv-orlando): Try to use __future__ for this routine only? - ratio = ((float(sp.total_size) / float(sp.chunk_size)) / - (float(self._sync_interval) / float(self._req_delay))) - new_size = max(1.0, ratio) * float(sp.chunk_size) - return int(new_size) + (new_size - int(new_size) > 0) - - def _fetch_data(self, uri, cursor, page_size): - # If not cursor there is nothing to retrieve - if cursor: - if cursor == 'start': - cursor = None - # Chunk size tuning might, in some conditions, make it larger - # than 5,000, which is the maximum page size allowed by the NSX - # API. In this case the request should be split in multiple - # requests. This is not ideal, and therefore a log warning will - # be emitted. - num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1 - if num_requests > 1: - LOG.warn(_("Requested page size is %(cur_chunk_size)d." - "It might be necessary to do %(num_requests)d " - "round-trips to NSX for fetching data. Please " - "tune sync parameters to ensure chunk size " - "is less than %(max_page_size)d"), - {'cur_chunk_size': page_size, - 'num_requests': num_requests, - 'max_page_size': MAX_PAGE_SIZE}) - # Only the first request might return the total size, - # subsequent requests will definetely not - results, cursor, total_size = nsxlib.get_single_query_page( - uri, self._cluster, cursor, - min(page_size, MAX_PAGE_SIZE)) - for _req in range(num_requests - 1): - # If no cursor is returned break the cycle as there is no - # actual need to perform multiple requests (all fetched) - # This happens when the overall size of resources exceeds - # the maximum page size, but the number for each single - # resource type is below this threshold - if not cursor: - break - req_results, cursor = nsxlib.get_single_query_page( - uri, self._cluster, cursor, - min(page_size, MAX_PAGE_SIZE))[:2] - results.extend(req_results) - # reset cursor before returning if we queried just to - # know the number of entities - return results, cursor if page_size else 'start', total_size - return [], cursor, None - - def _fetch_nsx_data_chunk(self, sp): - base_chunk_size = sp.chunk_size - chunk_size = base_chunk_size + sp.extra_chunk_size - LOG.info(_("Fetching up to %s resources " - "from NSX backend"), chunk_size) - fetched = ls_count = lr_count = lp_count = 0 - lswitches = lrouters = lswitchports = [] - if sp.ls_cursor or sp.ls_cursor == 'start': - (lswitches, sp.ls_cursor, ls_count) = self._fetch_data( - self.LS_URI, sp.ls_cursor, chunk_size) - fetched = len(lswitches) - if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start': - (lrouters, sp.lr_cursor, lr_count) = self._fetch_data( - self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0)) - fetched += len(lrouters) - if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start': - (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data( - self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0)) - fetched += len(lswitchports) - if sp.current_chunk == 0: - # No cursors were provided. Then it must be possible to - # calculate the total amount of data to fetch - sp.total_size = ls_count + lr_count + lp_count - LOG.debug(_("Total data size: %d"), sp.total_size) - sp.chunk_size = self._get_chunk_size(sp) - # Calculate chunk size adjustment - sp.extra_chunk_size = sp.chunk_size - base_chunk_size - LOG.debug(_("Fetched %(num_lswitches)d logical switches, " - "%(num_lswitchports)d logical switch ports," - "%(num_lrouters)d logical routers"), - {'num_lswitches': len(lswitches), - 'num_lswitchports': len(lswitchports), - 'num_lrouters': len(lrouters)}) - return (lswitches, lrouters, lswitchports) - - def _synchronize_state(self, sp): - # If the plugin has been destroyed, stop the LoopingCall - if not self._plugin: - raise loopingcall.LoopingCallDone - start = timeutils.utcnow() - # Reset page cursor variables if necessary - if sp.current_chunk == 0: - sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start' - LOG.info(_("Running state synchronization task. Chunk: %s"), - sp.current_chunk) - # Fetch chunk_size data from NSX - try: - (lswitches, lrouters, lswitchports) = ( - self._fetch_nsx_data_chunk(sp)) - except (api_exc.RequestTimeout, api_exc.NsxApiException): - sleep_interval = self._sync_backoff - # Cap max back off to 64 seconds - self._sync_backoff = min(self._sync_backoff * 2, 64) - LOG.exception(_("An error occurred while communicating with " - "NSX backend. Will retry synchronization " - "in %d seconds"), sleep_interval) - return sleep_interval - LOG.debug(_("Time elapsed querying NSX: %s"), - timeutils.utcnow() - start) - if sp.total_size: - num_chunks = ((sp.total_size / sp.chunk_size) + - (sp.total_size % sp.chunk_size != 0)) - else: - num_chunks = 1 - LOG.debug(_("Number of chunks: %d"), num_chunks) - # Find objects which have changed on NSX side and need - # to be synchronized - LOG.debug("Processing NSX cache for updated objects") - (ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates( - lswitches, lrouters, lswitchports) - # Process removed objects only at the last chunk - scan_missing = (sp.current_chunk == num_chunks - 1 and - not sp.init_sync_performed) - if sp.current_chunk == num_chunks - 1: - LOG.debug("Processing NSX cache for deleted objects") - self._nsx_cache.process_deletes() - ls_uuids = self._nsx_cache.get_lswitches( - changed_only=not scan_missing) - lr_uuids = self._nsx_cache.get_lrouters( - changed_only=not scan_missing) - lp_uuids = self._nsx_cache.get_lswitchports( - changed_only=not scan_missing) - LOG.debug(_("Time elapsed hashing data: %s"), - timeutils.utcnow() - start) - # Get an admin context - ctx = context.get_admin_context() - # Synchronize with database - self._synchronize_lswitches(ctx, ls_uuids, - scan_missing=scan_missing) - self._synchronize_lrouters(ctx, lr_uuids, - scan_missing=scan_missing) - self._synchronize_lswitchports(ctx, lp_uuids, - scan_missing=scan_missing) - # Increase chunk counter - LOG.info(_("Synchronization for chunk %(chunk_num)d of " - "%(total_chunks)d performed"), - {'chunk_num': sp.current_chunk + 1, - 'total_chunks': num_chunks}) - sp.current_chunk = (sp.current_chunk + 1) % num_chunks - added_delay = 0 - if sp.current_chunk == 0: - # Ensure init_sync_performed is True - if not sp.init_sync_performed: - sp.init_sync_performed = True - # Add additional random delay - added_delay = random.randint(0, self._max_rand_delay) - LOG.debug(_("Time elapsed at end of sync: %s"), - timeutils.utcnow() - start) - return self._sync_interval / num_chunks + added_delay diff --git a/neutron/plugins/vmware/common/utils.py b/neutron/plugins/vmware/common/utils.py deleted file mode 100644 index 496fa48a3..000000000 --- a/neutron/plugins/vmware/common/utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib - -from neutron.api.v2 import attributes -from neutron.openstack.common import log -from neutron import version - - -LOG = log.getLogger(__name__) -MAX_DISPLAY_NAME_LEN = 40 -NEUTRON_VERSION = version.version_info.release_string() - - -# Allowed network types for the NSX Plugin -class NetworkTypes: - """Allowed provider network types for the NSX Plugin.""" - L3_EXT = 'l3_ext' - STT = 'stt' - GRE = 'gre' - FLAT = 'flat' - VLAN = 'vlan' - BRIDGE = 'bridge' - - -def get_tags(**kwargs): - tags = ([dict(tag=value, scope=key) - for key, value in kwargs.iteritems()]) - tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"}) - return tags - - -def device_id_to_vm_id(device_id, obfuscate=False): - # device_id can be longer than 40 characters, for example - # a device_id for a dhcp port is like the following: - # - # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c - # - # To fit it into an NSX tag we need to hash it, however device_id - # used for ports associated to VM's are small enough so let's skip the - # hashing - if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate: - return hashlib.sha1(device_id).hexdigest() - else: - return device_id - - -def check_and_truncate(display_name): - if (attributes.is_attr_set(display_name) and - len(display_name) > MAX_DISPLAY_NAME_LEN): - LOG.debug(_("Specified name:'%s' exceeds maximum length. " - "It will be truncated on NSX"), display_name) - return display_name[:MAX_DISPLAY_NAME_LEN] - return display_name or '' diff --git a/neutron/plugins/vmware/dbexts/__init__.py b/neutron/plugins/vmware/dbexts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/dbexts/db.py b/neutron/plugins/vmware/dbexts/db.py deleted file mode 100644 index 0db4f09a3..000000000 --- a/neutron/plugins/vmware/dbexts/db.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2012 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.orm import exc - -import neutron.db.api as db -from neutron.openstack.common.db import exception as db_exc -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.dbexts import models -from neutron.plugins.vmware.dbexts import networkgw_db - -LOG = logging.getLogger(__name__) - - -def get_network_bindings(session, network_id): - session = session or db.get_session() - return (session.query(models.TzNetworkBinding). - filter_by(network_id=network_id). - all()) - - -def get_network_bindings_by_vlanid(session, vlan_id): - session = session or db.get_session() - return (session.query(models.TzNetworkBinding). - filter_by(vlan_id=vlan_id). - all()) - - -def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): - with session.begin(subtransactions=True): - binding = models.TzNetworkBinding(network_id, binding_type, - phy_uuid, vlan_id) - session.add(binding) - return binding - - -def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id): - with session.begin(subtransactions=True): - mapping = models.NeutronNsxNetworkMapping( - neutron_id=neutron_id, nsx_id=nsx_switch_id) - session.add(mapping) - return mapping - - -def add_neutron_nsx_port_mapping(session, neutron_id, - nsx_switch_id, nsx_port_id): - session.begin(subtransactions=True) - try: - mapping = models.NeutronNsxPortMapping( - neutron_id, nsx_switch_id, nsx_port_id) - session.add(mapping) - session.commit() - except db_exc.DBDuplicateEntry: - with excutils.save_and_reraise_exception() as ctxt: - session.rollback() - # do not complain if the same exact mapping is being added, - # otherwise re-raise because even though it is possible for the - # same neutron port to map to different back-end ports over time, - # this should not occur whilst a mapping already exists - current = get_nsx_switch_and_port_id(session, neutron_id) - if current[1] == nsx_port_id: - LOG.debug(_("Port mapping for %s already available"), - neutron_id) - ctxt.reraise = False - except db_exc.DBError: - with excutils.save_and_reraise_exception(): - # rollback for any other db error - session.rollback() - return mapping - - -def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id): - with session.begin(subtransactions=True): - mapping = models.NeutronNsxRouterMapping( - neutron_id=neutron_id, nsx_id=nsx_router_id) - session.add(mapping) - return mapping - - -def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id): - """Map a Neutron security group to a NSX security profile. - - :param session: a valid database session object - :param neutron_id: a neutron security group identifier - :param nsx_id: a nsx security profile identifier - """ - with session.begin(subtransactions=True): - mapping = models.NeutronNsxSecurityGroupMapping( - neutron_id=neutron_id, nsx_id=nsx_id) - session.add(mapping) - return mapping - - -def get_nsx_switch_ids(session, neutron_id): - # This function returns a list of NSX switch identifiers because of - # the possibility of chained logical switches - return [mapping['nsx_id'] for mapping in - session.query(models.NeutronNsxNetworkMapping).filter_by( - neutron_id=neutron_id)] - - -def get_nsx_switch_and_port_id(session, neutron_id): - try: - mapping = (session.query(models.NeutronNsxPortMapping). - filter_by(neutron_id=neutron_id). - one()) - return mapping['nsx_switch_id'], mapping['nsx_port_id'] - except exc.NoResultFound: - LOG.debug(_("NSX identifiers for neutron port %s not yet " - "stored in Neutron DB"), neutron_id) - return None, None - - -def get_nsx_router_id(session, neutron_id): - try: - mapping = (session.query(models.NeutronNsxRouterMapping). - filter_by(neutron_id=neutron_id).one()) - return mapping['nsx_id'] - except exc.NoResultFound: - LOG.debug(_("NSX identifiers for neutron router %s not yet " - "stored in Neutron DB"), neutron_id) - - -def get_nsx_security_group_id(session, neutron_id): - """Return the id of a security group in the NSX backend. - - Note: security groups are called 'security profiles' in NSX - """ - try: - mapping = (session.query(models.NeutronNsxSecurityGroupMapping). - filter_by(neutron_id=neutron_id). - one()) - return mapping['nsx_id'] - except exc.NoResultFound: - LOG.debug(_("NSX identifiers for neutron security group %s not yet " - "stored in Neutron DB"), neutron_id) - return None - - -def _delete_by_neutron_id(session, model, neutron_id): - return session.query(model).filter_by(neutron_id=neutron_id).delete() - - -def delete_neutron_nsx_port_mapping(session, neutron_id): - return _delete_by_neutron_id( - session, models.NeutronNsxPortMapping, neutron_id) - - -def delete_neutron_nsx_router_mapping(session, neutron_id): - return _delete_by_neutron_id( - session, models.NeutronNsxRouterMapping, neutron_id) - - -def unset_default_network_gateways(session): - with session.begin(subtransactions=True): - session.query(networkgw_db.NetworkGateway).update( - {networkgw_db.NetworkGateway.default: False}) - - -def set_default_network_gateway(session, gw_id): - with session.begin(subtransactions=True): - gw = (session.query(networkgw_db.NetworkGateway). - filter_by(id=gw_id).one()) - gw['default'] = True - - -def set_multiprovider_network(session, network_id): - with session.begin(subtransactions=True): - multiprovider_network = models.MultiProviderNetworks( - network_id) - session.add(multiprovider_network) - return multiprovider_network - - -def is_multiprovider_network(session, network_id): - with session.begin(subtransactions=True): - return bool( - session.query(models.MultiProviderNetworks).filter_by( - network_id=network_id).first()) diff --git a/neutron/plugins/vmware/dbexts/distributedrouter.py b/neutron/plugins/vmware/dbexts/distributedrouter.py deleted file mode 100644 index 5c6accbd3..000000000 --- a/neutron/plugins/vmware/dbexts/distributedrouter.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.plugins.vmware.dbexts import nsxrouter -from neutron.plugins.vmware.extensions import distributedrouter as dist_rtr - - -class DistributedRouter_mixin(nsxrouter.NsxRouterMixin): - """Mixin class to enable distributed router support.""" - - nsx_attributes = ( - nsxrouter.NsxRouterMixin.nsx_attributes + [{ - 'name': dist_rtr.DISTRIBUTED, - 'default': False - }]) diff --git a/neutron/plugins/vmware/dbexts/lsn_db.py b/neutron/plugins/vmware/dbexts/lsn_db.py deleted file mode 100644 index 25a457ac7..000000000 --- a/neutron/plugins/vmware/dbexts/lsn_db.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from sqlalchemy import Column -from sqlalchemy import ForeignKey -from sqlalchemy import orm -from sqlalchemy import String - -from neutron.db import models_v2 -from neutron.openstack.common.db import exception as d_exc -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import exceptions as p_exc - - -LOG = logging.getLogger(__name__) - - -class LsnPort(models_v2.model_base.BASEV2): - - __tablename__ = 'lsn_port' - - lsn_port_id = Column(String(36), primary_key=True) - - lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE"), - nullable=False) - sub_id = Column(String(36), nullable=False, unique=True) - mac_addr = Column(String(32), nullable=False, unique=True) - - def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): - self.lsn_port_id = lsn_port_id - self.lsn_id = lsn_id - self.sub_id = subnet_id - self.mac_addr = mac_address - - -class Lsn(models_v2.model_base.BASEV2): - __tablename__ = 'lsn' - - lsn_id = Column(String(36), primary_key=True) - net_id = Column(String(36), nullable=False) - - def __init__(self, net_id, lsn_id): - self.net_id = net_id - self.lsn_id = lsn_id - - -def lsn_add(context, network_id, lsn_id): - """Add Logical Service Node information to persistent datastore.""" - with context.session.begin(subtransactions=True): - lsn = Lsn(network_id, lsn_id) - context.session.add(lsn) - - -def lsn_remove(context, lsn_id): - """Remove Logical Service Node information from datastore given its id.""" - with context.session.begin(subtransactions=True): - context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete() - - -def lsn_remove_for_network(context, network_id): - """Remove information about the Logical Service Node given its network.""" - with context.session.begin(subtransactions=True): - context.session.query(Lsn).filter_by(net_id=network_id).delete() - - -def lsn_get_for_network(context, network_id, raise_on_err=True): - """Retrieve LSN information given its network id.""" - query = context.session.query(Lsn) - try: - return query.filter_by(net_id=network_id).one() - except (orm.exc.NoResultFound, d_exc.DBError): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node for ' - 'network %s'), network_id) - if raise_on_err: - raise p_exc.LsnNotFound(entity='network', - entity_id=network_id) - - -def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): - """Add Logical Service Node Port information to persistent datastore.""" - with context.session.begin(subtransactions=True): - lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id) - context.session.add(lsn_port) - - -def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): - """Return Logical Service Node Port information given its subnet id.""" - with context.session.begin(subtransactions=True): - try: - return (context.session.query(LsnPort). - filter_by(sub_id=subnet_id).one()) - except (orm.exc.NoResultFound, d_exc.DBError): - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=None, - entity='subnet', - entity_id=subnet_id) - - -def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): - """Return Logical Service Node Port information given its mac address.""" - with context.session.begin(subtransactions=True): - try: - return (context.session.query(LsnPort). - filter_by(mac_addr=mac_address).one()) - except (orm.exc.NoResultFound, d_exc.DBError): - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=None, - entity='mac', - entity_id=mac_address) - - -def lsn_port_remove(context, lsn_port_id): - """Remove Logical Service Node port from the given Logical Service Node.""" - with context.session.begin(subtransactions=True): - (context.session.query(LsnPort). - filter_by(lsn_port_id=lsn_port_id).delete()) diff --git a/neutron/plugins/vmware/dbexts/maclearning.py b/neutron/plugins/vmware/dbexts/maclearning.py deleted file mode 100644 index 6a5f73acd..000000000 --- a/neutron/plugins/vmware/dbexts/maclearning.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy.orm import exc - -from neutron.api.v2 import attributes -from neutron.db import db_base_plugin_v2 -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.extensions import maclearning as mac - -LOG = logging.getLogger(__name__) - - -class MacLearningState(model_base.BASEV2): - - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False) - - # Add a relationship to the Port model using the backref attribute. - # This will instruct SQLAlchemy to eagerly load this association. - port = orm.relationship( - models_v2.Port, - backref=orm.backref("mac_learning_state", lazy='joined', - uselist=False, cascade='delete')) - - -class MacLearningDbMixin(object): - """Mixin class for mac learning.""" - - def _make_mac_learning_state_dict(self, port, fields=None): - res = {'port_id': port['port_id'], - mac.MAC_LEARNING: port[mac.MAC_LEARNING]} - return self._fields(res, fields) - - def _extend_port_mac_learning_state(self, port_res, port_db): - state = port_db.mac_learning_state - if state and state.mac_learning_enabled: - port_res[mac.MAC_LEARNING] = state.mac_learning_enabled - - # Register dict extend functions for ports - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attributes.PORTS, ['_extend_port_mac_learning_state']) - - def _update_mac_learning_state(self, context, port_id, enabled): - try: - query = self._model_query(context, MacLearningState) - state = query.filter(MacLearningState.port_id == port_id).one() - state.update({mac.MAC_LEARNING: enabled}) - except exc.NoResultFound: - self._create_mac_learning_state(context, - {'id': port_id, - mac.MAC_LEARNING: enabled}) - - def _create_mac_learning_state(self, context, port): - with context.session.begin(subtransactions=True): - enabled = port[mac.MAC_LEARNING] - state = MacLearningState(port_id=port['id'], - mac_learning_enabled=enabled) - context.session.add(state) - return self._make_mac_learning_state_dict(state) diff --git a/neutron/plugins/vmware/dbexts/models.py b/neutron/plugins/vmware/dbexts/models.py deleted file mode 100644 index 356332811..000000000 --- a/neutron/plugins/vmware/dbexts/models.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from sqlalchemy import Boolean, Column, Enum, ForeignKey, Integer, String -from sqlalchemy import orm - -from neutron.db import l3_db -from neutron.db import model_base - - -class TzNetworkBinding(model_base.BASEV2): - """Represents a binding of a virtual network with a transport zone. - - This model class associates a Neutron network with a transport zone; - optionally a vlan ID might be used if the binding type is 'bridge' - """ - __tablename__ = 'tz_network_bindings' - - # TODO(arosen) - it might be worth while refactoring the how this data - # is stored later so every column does not need to be a primary key. - network_id = Column(String(36), - ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - # 'flat', 'vlan', stt' or 'gre' - binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', - name='tz_network_bindings_binding_type'), - nullable=False, primary_key=True) - phy_uuid = Column(String(36), primary_key=True, nullable=True) - vlan_id = Column(Integer, primary_key=True, nullable=True, - autoincrement=False) - - def __init__(self, network_id, binding_type, phy_uuid, vlan_id): - self.network_id = network_id - self.binding_type = binding_type - self.phy_uuid = phy_uuid - self.vlan_id = vlan_id - - def __repr__(self): - return "" % (self.network_id, - self.binding_type, - self.phy_uuid, - self.vlan_id) - - -class NeutronNsxNetworkMapping(model_base.BASEV2): - """Maps neutron network identifiers to NSX identifiers. - - Because of chained logical switches more than one mapping might exist - for a single Neutron network. - """ - __tablename__ = 'neutron_nsx_network_mappings' - neutron_id = Column(String(36), - ForeignKey('networks.id', ondelete='CASCADE'), - primary_key=True) - nsx_id = Column(String(36), primary_key=True) - - -class NeutronNsxSecurityGroupMapping(model_base.BASEV2): - """Backend mappings for Neutron Security Group identifiers. - - This class maps a neutron security group identifier to the corresponding - NSX security profile identifier. - """ - - __tablename__ = 'neutron_nsx_security_group_mappings' - neutron_id = Column(String(36), - ForeignKey('securitygroups.id', ondelete="CASCADE"), - primary_key=True) - nsx_id = Column(String(36), primary_key=True) - - -class NeutronNsxPortMapping(model_base.BASEV2): - """Represents the mapping between neutron and nsx port uuids.""" - - __tablename__ = 'neutron_nsx_port_mappings' - neutron_id = Column(String(36), - ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - nsx_switch_id = Column(String(36)) - nsx_port_id = Column(String(36), nullable=False) - - def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): - self.neutron_id = neutron_id - self.nsx_switch_id = nsx_switch_id - self.nsx_port_id = nsx_port_id - - -class NeutronNsxRouterMapping(model_base.BASEV2): - """Maps neutron router identifiers to NSX identifiers.""" - __tablename__ = 'neutron_nsx_router_mappings' - neutron_id = Column(String(36), - ForeignKey('routers.id', ondelete='CASCADE'), - primary_key=True) - nsx_id = Column(String(36)) - - -class MultiProviderNetworks(model_base.BASEV2): - """Networks provisioned through multiprovider extension.""" - - __tablename__ = 'multi_provider_networks' - network_id = Column(String(36), - ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - - def __init__(self, network_id): - self.network_id = network_id - - -class NSXRouterExtAttributes(model_base.BASEV2): - """Router attributes managed by NSX plugin extensions.""" - router_id = Column(String(36), - ForeignKey('routers.id', ondelete="CASCADE"), - primary_key=True) - distributed = Column(Boolean, default=False, nullable=False) - service_router = Column(Boolean, default=False, nullable=False) - # Add a relationship to the Router model in order to instruct - # SQLAlchemy to eagerly load this association - router = orm.relationship( - l3_db.Router, - backref=orm.backref("nsx_attributes", lazy='joined', - uselist=False, cascade='delete')) diff --git a/neutron/plugins/vmware/dbexts/networkgw_db.py b/neutron/plugins/vmware/dbexts/networkgw_db.py deleted file mode 100644 index fb5eb6268..000000000 --- a/neutron/plugins/vmware/dbexts/networkgw_db.py +++ /dev/null @@ -1,499 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from sqlalchemy import orm -from sqlalchemy.orm import exc as sa_orm_exc - -from neutron.api.v2 import attributes -from neutron.common import exceptions -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.openstack.common import uuidutils -from neutron.plugins.vmware.extensions import networkgw - - -LOG = logging.getLogger(__name__) -DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface' -NETWORK_ID = 'network_id' -SEGMENTATION_TYPE = 'segmentation_type' -SEGMENTATION_ID = 'segmentation_id' -ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID, - SEGMENTATION_TYPE, - SEGMENTATION_ID)) -# Constants for gateway device operational status -STATUS_UNKNOWN = "UNKNOWN" -STATUS_ERROR = "ERROR" -STATUS_ACTIVE = "ACTIVE" -STATUS_DOWN = "DOWN" - - -class GatewayInUse(exceptions.InUse): - message = _("Network Gateway '%(gateway_id)s' still has active mappings " - "with one or more neutron networks.") - - -class GatewayNotFound(exceptions.NotFound): - message = _("Network Gateway %(gateway_id)s could not be found") - - -class GatewayDeviceInUse(exceptions.InUse): - message = _("Network Gateway Device '%(device_id)s' is still used by " - "one or more network gateways.") - - -class GatewayDeviceNotFound(exceptions.NotFound): - message = _("Network Gateway Device %(device_id)s could not be found.") - - -class NetworkGatewayPortInUse(exceptions.InUse): - message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and " - "therefore cannot be deleted directly via the port API.") - - -class GatewayConnectionInUse(exceptions.InUse): - message = _("The specified mapping '%(mapping)s' is already in use on " - "network gateway '%(gateway_id)s'.") - - -class MultipleGatewayConnections(exceptions.Conflict): - message = _("Multiple network connections found on '%(gateway_id)s' " - "with provided criteria.") - - -class GatewayConnectionNotFound(exceptions.NotFound): - message = _("The connection %(network_mapping_info)s was not found on the " - "network gateway '%(network_gateway_id)s'") - - -class NetworkGatewayUnchangeable(exceptions.InUse): - message = _("The network gateway %(gateway_id)s " - "cannot be updated or deleted") - - -class NetworkConnection(model_base.BASEV2, models_v2.HasTenant): - """Defines a connection between a network gateway and a network.""" - # We use port_id as the primary key as one can connect a gateway - # to a network in multiple ways (and we cannot use the same port form - # more than a single gateway) - network_gateway_id = sa.Column(sa.String(36), - sa.ForeignKey('networkgateways.id', - ondelete='CASCADE')) - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete='CASCADE')) - segmentation_type = sa.Column( - sa.Enum('flat', 'vlan', - name='networkconnections_segmentation_type')) - segmentation_id = sa.Column(sa.Integer) - __table_args__ = (sa.UniqueConstraint(network_gateway_id, - segmentation_type, - segmentation_id),) - # Also, storing port id comes back useful when disconnecting a network - # from a gateway - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete='CASCADE'), - primary_key=True) - - -class NetworkGatewayDeviceReference(model_base.BASEV2): - id = sa.Column(sa.String(36), primary_key=True) - network_gateway_id = sa.Column(sa.String(36), - sa.ForeignKey('networkgateways.id', - ondelete='CASCADE'), - primary_key=True) - interface_name = sa.Column(sa.String(64), primary_key=True) - - -class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - nsx_id = sa.Column(sa.String(36)) - # Optional name for the gateway device - name = sa.Column(sa.String(255)) - # Transport connector type. Not using enum as range of - # connector types might vary with backend version - connector_type = sa.Column(sa.String(10)) - # Transport connector IP Address - connector_ip = sa.Column(sa.String(64)) - # operational status - status = sa.Column(sa.String(16)) - - -class NetworkGateway(model_base.BASEV2, models_v2.HasId, - models_v2.HasTenant): - """Defines the data model for a network gateway.""" - name = sa.Column(sa.String(255)) - # Tenant id is nullable for this resource - tenant_id = sa.Column(sa.String(36)) - default = sa.Column(sa.Boolean()) - devices = orm.relationship(NetworkGatewayDeviceReference, - backref='networkgateways', - cascade='all,delete') - network_connections = orm.relationship(NetworkConnection, lazy='joined') - - -class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase): - - gateway_resource = networkgw.GATEWAY_RESOURCE_NAME - device_resource = networkgw.DEVICE_RESOURCE_NAME - - def _get_network_gateway(self, context, gw_id): - try: - gw = self._get_by_id(context, NetworkGateway, gw_id) - except sa_orm_exc.NoResultFound: - raise GatewayNotFound(gateway_id=gw_id) - return gw - - def _make_gw_connection_dict(self, gw_conn): - return {'port_id': gw_conn['port_id'], - 'segmentation_type': gw_conn['segmentation_type'], - 'segmentation_id': gw_conn['segmentation_id']} - - def _make_network_gateway_dict(self, network_gateway, fields=None): - device_list = [] - for d in network_gateway['devices']: - device_list.append({'id': d['id'], - 'interface_name': d['interface_name']}) - res = {'id': network_gateway['id'], - 'name': network_gateway['name'], - 'default': network_gateway['default'], - 'devices': device_list, - 'tenant_id': network_gateway['tenant_id']} - # Query gateway connections only if needed - if (fields and 'ports' in fields) or not fields: - res['ports'] = [self._make_gw_connection_dict(conn) - for conn in network_gateway.network_connections] - return self._fields(res, fields) - - def _set_mapping_info_defaults(self, mapping_info): - if not mapping_info.get('segmentation_type'): - mapping_info['segmentation_type'] = 'flat' - if not mapping_info.get('segmentation_id'): - mapping_info['segmentation_id'] = 0 - - def _validate_network_mapping_info(self, network_mapping_info): - self._set_mapping_info_defaults(network_mapping_info) - network_id = network_mapping_info.get(NETWORK_ID) - if not network_id: - raise exceptions.InvalidInput( - error_message=_("A network identifier must be specified " - "when connecting a network to a network " - "gateway. Unable to complete operation")) - connection_attrs = set(network_mapping_info.keys()) - if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES): - raise exceptions.InvalidInput( - error_message=(_("Invalid keys found among the ones provided " - "in request body: %(connection_attrs)s."), - connection_attrs)) - seg_type = network_mapping_info.get(SEGMENTATION_TYPE) - seg_id = network_mapping_info.get(SEGMENTATION_ID) - if not seg_type and seg_id: - msg = _("In order to specify a segmentation id the " - "segmentation type must be specified as well") - raise exceptions.InvalidInput(error_message=msg) - elif seg_type and seg_type.lower() == 'flat' and seg_id: - msg = _("Cannot specify a segmentation id when " - "the segmentation type is flat") - raise exceptions.InvalidInput(error_message=msg) - return network_id - - def _retrieve_gateway_connections(self, context, gateway_id, - mapping_info={}, only_one=False): - filters = {'network_gateway_id': [gateway_id]} - for k, v in mapping_info.iteritems(): - if v and k != NETWORK_ID: - filters[k] = [v] - query = self._get_collection_query(context, - NetworkConnection, - filters) - return only_one and query.one() or query.all() - - def _unset_default_network_gateways(self, context): - with context.session.begin(subtransactions=True): - context.session.query(NetworkGateway).update( - {NetworkGateway.default: False}) - - def _set_default_network_gateway(self, context, gw_id): - with context.session.begin(subtransactions=True): - gw = (context.session.query(NetworkGateway). - filter_by(id=gw_id).one()) - gw['default'] = True - - def prevent_network_gateway_port_deletion(self, context, port): - """Pre-deletion check. - - Ensures a port will not be deleted if is being used by a network - gateway. In that case an exception will be raised. - """ - if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF: - raise NetworkGatewayPortInUse(port_id=port['id'], - device_owner=port['device_owner']) - - def create_network_gateway(self, context, network_gateway): - gw_data = network_gateway[self.gateway_resource] - tenant_id = self._get_tenant_id_for_create(context, gw_data) - with context.session.begin(subtransactions=True): - gw_db = NetworkGateway( - id=gw_data.get('id', uuidutils.generate_uuid()), - tenant_id=tenant_id, - name=gw_data.get('name')) - # Device list is guaranteed to be a valid list - device_query = self._query_gateway_devices( - context, filters={'id': [device['id'] - for device in gw_data['devices']]}) - for device in device_query: - if device['tenant_id'] != tenant_id: - raise GatewayDeviceNotFound(device_id=device['id']) - gw_db.devices.extend([NetworkGatewayDeviceReference(**device) - for device in gw_data['devices']]) - context.session.add(gw_db) - LOG.debug(_("Created network gateway with id:%s"), gw_db['id']) - return self._make_network_gateway_dict(gw_db) - - def update_network_gateway(self, context, id, network_gateway): - gw_data = network_gateway[self.gateway_resource] - with context.session.begin(subtransactions=True): - gw_db = self._get_network_gateway(context, id) - if gw_db.default: - raise NetworkGatewayUnchangeable(gateway_id=id) - # Ensure there is something to update before doing it - if any([gw_db[k] != gw_data[k] for k in gw_data]): - gw_db.update(gw_data) - LOG.debug(_("Updated network gateway with id:%s"), id) - return self._make_network_gateway_dict(gw_db) - - def get_network_gateway(self, context, id, fields=None): - gw_db = self._get_network_gateway(context, id) - return self._make_network_gateway_dict(gw_db, fields) - - def delete_network_gateway(self, context, id): - with context.session.begin(subtransactions=True): - gw_db = self._get_network_gateway(context, id) - if gw_db.network_connections: - raise GatewayInUse(gateway_id=id) - if gw_db.default: - raise NetworkGatewayUnchangeable(gateway_id=id) - context.session.delete(gw_db) - LOG.debug(_("Network gateway '%s' was destroyed."), id) - - def get_network_gateways(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - marker_obj = self._get_marker_obj( - context, 'network_gateway', limit, marker) - return self._get_collection(context, NetworkGateway, - self._make_network_gateway_dict, - filters=filters, fields=fields, - sorts=sorts, limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) - - def connect_network(self, context, network_gateway_id, - network_mapping_info): - network_id = self._validate_network_mapping_info(network_mapping_info) - LOG.debug(_("Connecting network '%(network_id)s' to gateway " - "'%(network_gateway_id)s'"), - {'network_id': network_id, - 'network_gateway_id': network_gateway_id}) - with context.session.begin(subtransactions=True): - gw_db = self._get_network_gateway(context, network_gateway_id) - tenant_id = self._get_tenant_id_for_create(context, gw_db) - # TODO(salvatore-orlando): Leverage unique constraint instead - # of performing another query! - if self._retrieve_gateway_connections(context, - network_gateway_id, - network_mapping_info): - raise GatewayConnectionInUse(mapping=network_mapping_info, - gateway_id=network_gateway_id) - # TODO(salvatore-orlando): Creating a port will give it an IP, - # but we actually do not need any. Instead of wasting an IP we - # should have a way to say a port shall not be associated with - # any subnet - try: - # We pass the segmentation type and id too - the plugin - # might find them useful as the network connection object - # does not exist yet. - # NOTE: they're not extended attributes, rather extra data - # passed in the port structure to the plugin - # TODO(salvatore-orlando): Verify optimal solution for - # ownership of the gateway port - port = self.create_port(context, { - 'port': - {'tenant_id': tenant_id, - 'network_id': network_id, - 'mac_address': attributes.ATTR_NOT_SPECIFIED, - 'admin_state_up': True, - 'fixed_ips': [], - 'device_id': network_gateway_id, - 'device_owner': DEVICE_OWNER_NET_GW_INTF, - 'name': '', - 'gw:segmentation_type': - network_mapping_info.get('segmentation_type'), - 'gw:segmentation_id': - network_mapping_info.get('segmentation_id')}}) - except exceptions.NetworkNotFound: - err_msg = (_("Requested network '%(network_id)s' not found." - "Unable to create network connection on " - "gateway '%(network_gateway_id)s") % - {'network_id': network_id, - 'network_gateway_id': network_gateway_id}) - LOG.error(err_msg) - raise exceptions.InvalidInput(error_message=err_msg) - port_id = port['id'] - LOG.debug(_("Gateway port for '%(network_gateway_id)s' " - "created on network '%(network_id)s':%(port_id)s"), - {'network_gateway_id': network_gateway_id, - 'network_id': network_id, - 'port_id': port_id}) - # Create NetworkConnection record - network_mapping_info['port_id'] = port_id - network_mapping_info['tenant_id'] = tenant_id - gw_db.network_connections.append( - NetworkConnection(**network_mapping_info)) - port_id = port['id'] - # now deallocate and recycle ip from the port - for fixed_ip in port.get('fixed_ips', []): - self._delete_ip_allocation(context, network_id, - fixed_ip['subnet_id'], - fixed_ip['ip_address']) - LOG.debug(_("Ensured no Ip addresses are configured on port %s"), - port_id) - return {'connection_info': - {'network_gateway_id': network_gateway_id, - 'network_id': network_id, - 'port_id': port_id}} - - def disconnect_network(self, context, network_gateway_id, - network_mapping_info): - network_id = self._validate_network_mapping_info(network_mapping_info) - LOG.debug(_("Disconnecting network '%(network_id)s' from gateway " - "'%(network_gateway_id)s'"), - {'network_id': network_id, - 'network_gateway_id': network_gateway_id}) - with context.session.begin(subtransactions=True): - # Uniquely identify connection, otherwise raise - try: - net_connection = self._retrieve_gateway_connections( - context, network_gateway_id, - network_mapping_info, only_one=True) - except sa_orm_exc.NoResultFound: - raise GatewayConnectionNotFound( - network_mapping_info=network_mapping_info, - network_gateway_id=network_gateway_id) - except sa_orm_exc.MultipleResultsFound: - raise MultipleGatewayConnections( - gateway_id=network_gateway_id) - # Remove gateway port from network - # FIXME(salvatore-orlando): Ensure state of port in NSX is - # consistent with outcome of transaction - self.delete_port(context, net_connection['port_id'], - nw_gw_port_check=False) - # Remove NetworkConnection record - context.session.delete(net_connection) - - def _make_gateway_device_dict(self, gateway_device, fields=None, - include_nsx_id=False): - res = {'id': gateway_device['id'], - 'name': gateway_device['name'], - 'status': gateway_device['status'], - 'connector_type': gateway_device['connector_type'], - 'connector_ip': gateway_device['connector_ip'], - 'tenant_id': gateway_device['tenant_id']} - if include_nsx_id: - # Return the NSX mapping as well. This attribute will not be - # returned in the API response anyway. Ensure it will not be - # filtered out in field selection. - if fields: - fields.append('nsx_id') - res['nsx_id'] = gateway_device['nsx_id'] - return self._fields(res, fields) - - def _get_gateway_device(self, context, device_id): - try: - return self._get_by_id(context, NetworkGatewayDevice, device_id) - except sa_orm_exc.NoResultFound: - raise GatewayDeviceNotFound(device_id=device_id) - - def _is_device_in_use(self, context, device_id): - query = self._get_collection_query( - context, NetworkGatewayDeviceReference, {'id': [device_id]}) - return query.first() - - def get_gateway_device(self, context, device_id, fields=None, - include_nsx_id=False): - return self._make_gateway_device_dict( - self._get_gateway_device(context, device_id), - fields, include_nsx_id) - - def _query_gateway_devices(self, context, - filters=None, sorts=None, - limit=None, marker=None, - page_reverse=None): - marker_obj = self._get_marker_obj( - context, 'gateway_device', limit, marker) - return self._get_collection_query(context, - NetworkGatewayDevice, - filters=filters, - sorts=sorts, - limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) - - def get_gateway_devices(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False, include_nsx_id=False): - query = self._query_gateway_devices(context, filters, sorts, limit, - marker, page_reverse) - return [self._make_gateway_device_dict(row, fields, include_nsx_id) - for row in query] - - def create_gateway_device(self, context, gateway_device, - initial_status=STATUS_UNKNOWN): - device_data = gateway_device[self.device_resource] - tenant_id = self._get_tenant_id_for_create(context, device_data) - with context.session.begin(subtransactions=True): - device_db = NetworkGatewayDevice( - id=device_data.get('id', uuidutils.generate_uuid()), - tenant_id=tenant_id, - name=device_data.get('name'), - connector_type=device_data['connector_type'], - connector_ip=device_data['connector_ip'], - status=initial_status) - context.session.add(device_db) - LOG.debug(_("Created network gateway device: %s"), device_db['id']) - return self._make_gateway_device_dict(device_db) - - def update_gateway_device(self, context, gateway_device_id, - gateway_device, include_nsx_id=False): - device_data = gateway_device[self.device_resource] - with context.session.begin(subtransactions=True): - device_db = self._get_gateway_device(context, gateway_device_id) - # Ensure there is something to update before doing it - if any([device_db[k] != device_data[k] for k in device_data]): - device_db.update(device_data) - LOG.debug(_("Updated network gateway device: %s"), - gateway_device_id) - return self._make_gateway_device_dict( - device_db, include_nsx_id=include_nsx_id) - - def delete_gateway_device(self, context, device_id): - with context.session.begin(subtransactions=True): - # A gateway device should not be deleted - # if it is used in any network gateway service - if self._is_device_in_use(context, device_id): - raise GatewayDeviceInUse(device_id=device_id) - device_db = self._get_gateway_device(context, device_id) - context.session.delete(device_db) - LOG.debug(_("Deleted network gateway device: %s."), device_id) diff --git a/neutron/plugins/vmware/dbexts/nsxrouter.py b/neutron/plugins/vmware/dbexts/nsxrouter.py deleted file mode 100644 index 48aa61266..000000000 --- a/neutron/plugins/vmware/dbexts/nsxrouter.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.db import db_base_plugin_v2 -from neutron.extensions import l3 -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.dbexts import models - -LOG = logging.getLogger(__name__) - - -class NsxRouterMixin(object): - """Mixin class to enable nsx router support.""" - - nsx_attributes = [] - - def _extend_nsx_router_dict(self, router_res, router_db): - nsx_attrs = router_db['nsx_attributes'] - # Return False if nsx attributes are not definied for this - # neutron router - for attr in self.nsx_attributes: - name = attr['name'] - default = attr['default'] - router_res[name] = ( - nsx_attrs and nsx_attrs[name] or default) - - def _process_nsx_router_create( - self, context, router_db, router_req): - if not router_db['nsx_attributes']: - kwargs = {} - for attr in self.nsx_attributes: - name = attr['name'] - default = attr['default'] - kwargs[name] = router_req.get(name, default) - nsx_attributes = models.NSXRouterExtAttributes( - router_id=router_db['id'], **kwargs) - context.session.add(nsx_attributes) - router_db['nsx_attributes'] = nsx_attributes - else: - # The situation where the record already exists will - # be likely once the NSXRouterExtAttributes model - # will allow for defining several attributes pertaining - # to different extensions - for attr in self.nsx_attributes: - name = attr['name'] - default = attr['default'] - router_db['nsx_attributes'][name] = router_req.get( - name, default) - LOG.debug(_("Nsx router extension successfully processed " - "for router:%s"), router_db['id']) - - # Register dict extend functions for ports - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - l3.ROUTERS, ['_extend_nsx_router_dict']) diff --git a/neutron/plugins/vmware/dbexts/qos_db.py b/neutron/plugins/vmware/dbexts/qos_db.py deleted file mode 100644 index b094a2293..000000000 --- a/neutron/plugins/vmware/dbexts/qos_db.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy.orm import exc - -from neutron.api.v2 import attributes as attr -from neutron.db import db_base_plugin_v2 -from neutron.db import model_base -from neutron.db import models_v2 -from neutron.openstack.common import log -from neutron.openstack.common import uuidutils -from neutron.plugins.vmware.extensions import qos - - -LOG = log.getLogger(__name__) - - -class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): - name = sa.Column(sa.String(255)) - default = sa.Column(sa.Boolean, default=False) - min = sa.Column(sa.Integer, nullable=False) - max = sa.Column(sa.Integer, nullable=True) - qos_marking = sa.Column(sa.Enum('untrusted', 'trusted', - name='qosqueues_qos_marking')) - dscp = sa.Column(sa.Integer) - - -class PortQueueMapping(model_base.BASEV2): - port_id = sa.Column(sa.String(36), - sa.ForeignKey("ports.id", ondelete="CASCADE"), - primary_key=True) - - queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"), - primary_key=True) - - # Add a relationship to the Port model adding a backref which will - # allow SQLAlchemy for eagerly load the queue binding - port = orm.relationship( - models_v2.Port, - backref=orm.backref("qos_queue", uselist=False, - cascade='delete', lazy='joined')) - - -class NetworkQueueMapping(model_base.BASEV2): - network_id = sa.Column(sa.String(36), - sa.ForeignKey("networks.id", ondelete="CASCADE"), - primary_key=True) - - queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id", - ondelete="CASCADE")) - - # Add a relationship to the Network model adding a backref which will - # allow SQLAlcremy for eagerly load the queue binding - network = orm.relationship( - models_v2.Network, - backref=orm.backref("qos_queue", uselist=False, - cascade='delete', lazy='joined')) - - -class QoSDbMixin(qos.QueuePluginBase): - """Mixin class to add queues.""" - - def create_qos_queue(self, context, qos_queue): - q = qos_queue['qos_queue'] - with context.session.begin(subtransactions=True): - qos_queue = QoSQueue(id=q.get('id', uuidutils.generate_uuid()), - name=q.get('name'), - tenant_id=q['tenant_id'], - default=q.get('default'), - min=q.get('min'), - max=q.get('max'), - qos_marking=q.get('qos_marking'), - dscp=q.get('dscp')) - context.session.add(qos_queue) - return self._make_qos_queue_dict(qos_queue) - - def get_qos_queue(self, context, queue_id, fields=None): - return self._make_qos_queue_dict( - self._get_qos_queue(context, queue_id), fields) - - def _get_qos_queue(self, context, queue_id): - try: - return self._get_by_id(context, QoSQueue, queue_id) - except exc.NoResultFound: - raise qos.QueueNotFound(id=queue_id) - - def get_qos_queues(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker) - return self._get_collection(context, QoSQueue, - self._make_qos_queue_dict, - filters=filters, fields=fields, - sorts=sorts, limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) - - def delete_qos_queue(self, context, queue_id): - qos_queue = self._get_qos_queue(context, queue_id) - with context.session.begin(subtransactions=True): - context.session.delete(qos_queue) - - def _process_port_queue_mapping(self, context, port_data, queue_id): - port_data[qos.QUEUE] = queue_id - if not queue_id: - return - with context.session.begin(subtransactions=True): - context.session.add(PortQueueMapping(port_id=port_data['id'], - queue_id=queue_id)) - - def _get_port_queue_bindings(self, context, filters=None, fields=None): - return self._get_collection(context, PortQueueMapping, - self._make_port_queue_binding_dict, - filters=filters, fields=fields) - - def _delete_port_queue_mapping(self, context, port_id): - query = self._model_query(context, PortQueueMapping) - try: - binding = query.filter(PortQueueMapping.port_id == port_id).one() - except exc.NoResultFound: - # return since this can happen if we are updating a port that - # did not already have a queue on it. There is no need to check - # if there is one before deleting if we return here. - return - with context.session.begin(subtransactions=True): - context.session.delete(binding) - - def _process_network_queue_mapping(self, context, net_data, queue_id): - net_data[qos.QUEUE] = queue_id - if not queue_id: - return - with context.session.begin(subtransactions=True): - context.session.add( - NetworkQueueMapping(network_id=net_data['id'], - queue_id=queue_id)) - - def _get_network_queue_bindings(self, context, filters=None, fields=None): - return self._get_collection(context, NetworkQueueMapping, - self._make_network_queue_binding_dict, - filters=filters, fields=fields) - - def _delete_network_queue_mapping(self, context, network_id): - query = self._model_query(context, NetworkQueueMapping) - with context.session.begin(subtransactions=True): - binding = query.filter_by(network_id=network_id).first() - if binding: - context.session.delete(binding) - - def _extend_dict_qos_queue(self, obj_res, obj_db): - queue_mapping = obj_db['qos_queue'] - if queue_mapping: - obj_res[qos.QUEUE] = queue_mapping.get('queue_id') - return obj_res - - def _extend_port_dict_qos_queue(self, port_res, port_db): - self._extend_dict_qos_queue(port_res, port_db) - - def _extend_network_dict_qos_queue(self, network_res, network_db): - self._extend_dict_qos_queue(network_res, network_db) - - # Register dict extend functions for networks and ports - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attr.NETWORKS, ['_extend_network_dict_qos_queue']) - db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( - attr.PORTS, ['_extend_port_dict_qos_queue']) - - def _make_qos_queue_dict(self, queue, fields=None): - res = {'id': queue['id'], - 'name': queue.get('name'), - 'default': queue.get('default'), - 'tenant_id': queue['tenant_id'], - 'min': queue.get('min'), - 'max': queue.get('max'), - 'qos_marking': queue.get('qos_marking'), - 'dscp': queue.get('dscp')} - return self._fields(res, fields) - - def _make_port_queue_binding_dict(self, queue, fields=None): - res = {'port_id': queue['port_id'], - 'queue_id': queue['queue_id']} - return self._fields(res, fields) - - def _make_network_queue_binding_dict(self, queue, fields=None): - res = {'network_id': queue['network_id'], - 'queue_id': queue['queue_id']} - return self._fields(res, fields) - - def _check_for_queue_and_create(self, context, port): - """Check for queue and create. - - This function determines if a port should be associated with a - queue. It works by first querying NetworkQueueMapping to determine - if the network is associated with a queue. If so, then it queries - NetworkQueueMapping for all the networks that are associated with - this queue. Next, it queries against all the ports on these networks - with the port device_id. Finally it queries PortQueueMapping. If that - query returns a queue_id that is returned. Otherwise a queue is - created that is the size of the queue associated with the network and - that queue_id is returned. - - If the network is not associated with a queue we then query to see - if there is a default queue in the system. If so, a copy of that is - created and the queue_id is returned. - - Otherwise None is returned. None is also returned if the port does not - have a device_id or if the device_owner is network: - """ - - queue_to_create = None - # If there is no device_id don't create a queue. The queue will be - # created on update port when the device_id is present. Also don't - # apply QoS to network ports. - if (not port.get('device_id') or - port['device_owner'].startswith('network:')): - return - - # Check if there is a queue assocated with the network - filters = {'network_id': [port['network_id']]} - network_queue_id = self._get_network_queue_bindings( - context, filters, ['queue_id']) - if network_queue_id: - # get networks that queue is assocated with - filters = {'queue_id': [network_queue_id[0]['queue_id']]} - networks_with_same_queue = self._get_network_queue_bindings( - context, filters) - - # get the ports on these networks with the same_queue and device_id - filters = {'device_id': [port.get('device_id')], - 'network_id': [network['network_id'] for - network in networks_with_same_queue]} - query = self._model_query(context, models_v2.Port.id) - query = self._apply_filters_to_query(query, models_v2.Port, - filters) - ports_ids = [p[0] for p in query] - if ports_ids: - # shared queue already exists find the queue id - queues = self._get_port_queue_bindings(context, - {'port_id': ports_ids}, - ['queue_id']) - if queues: - return queues[0]['queue_id'] - - # get the size of the queue we want to create - queue_to_create = self._get_qos_queue( - context, network_queue_id[0]['queue_id']) - - else: - # check for default queue - filters = {'default': [True]} - # context is elevated since default queue is owned by admin - queue_to_create = self.get_qos_queues(context.elevated(), filters) - if not queue_to_create: - return - queue_to_create = queue_to_create[0] - - # create the queue - tenant_id = self._get_tenant_id_for_create(context, port) - if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'): - queue_to_create['max'] *= int(port[qos.RXTX_FACTOR]) - queue = {'qos_queue': {'name': queue_to_create.get('name'), - 'min': queue_to_create.get('min'), - 'max': queue_to_create.get('max'), - 'dscp': queue_to_create.get('dscp'), - 'qos_marking': - queue_to_create.get('qos_marking'), - 'tenant_id': tenant_id}} - return self.create_qos_queue(context, queue, False)['id'] - - def _validate_qos_queue(self, context, qos_queue): - if qos_queue.get('default'): - if context.is_admin: - if self.get_qos_queues(context, filters={'default': [True]}): - raise qos.DefaultQueueAlreadyExists() - else: - raise qos.DefaultQueueCreateNotAdmin() - if qos_queue.get('qos_marking') == 'trusted': - dscp = qos_queue.pop('dscp') - LOG.info(_("DSCP value (%s) will be ignored with 'trusted' " - "marking"), dscp) - max = qos_queue.get('max') - min = qos_queue.get('min') - # Max can be None - if max and min > max: - raise qos.QueueMinGreaterMax() diff --git a/neutron/plugins/vmware/dbexts/servicerouter.py b/neutron/plugins/vmware/dbexts/servicerouter.py deleted file mode 100644 index bc34cd4c3..000000000 --- a/neutron/plugins/vmware/dbexts/servicerouter.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.plugins.vmware.dbexts import distributedrouter as dist_rtr -from neutron.plugins.vmware.extensions import servicerouter - - -class ServiceRouter_mixin(dist_rtr.DistributedRouter_mixin): - """Mixin class to enable service router support.""" - - nsx_attributes = ( - dist_rtr.DistributedRouter_mixin.nsx_attributes + [{ - 'name': servicerouter.SERVICE_ROUTER, - 'default': False - }]) diff --git a/neutron/plugins/vmware/dbexts/vcns_db.py b/neutron/plugins/vmware/dbexts/vcns_db.py deleted file mode 100644 index 24b3e5b8a..000000000 --- a/neutron/plugins/vmware/dbexts/vcns_db.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy.orm import exc - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.dbexts import vcns_models -from neutron.plugins.vmware.vshield.common import ( - exceptions as vcns_exc) - -LOG = logging.getLogger(__name__) - - -def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status): - with session.begin(subtransactions=True): - binding = vcns_models.VcnsRouterBinding( - router_id=router_id, - edge_id=vse_id, - lswitch_id=lswitch_id, - status=status) - session.add(binding) - return binding - - -def get_vcns_router_binding(session, router_id): - with session.begin(subtransactions=True): - return (session.query(vcns_models.VcnsRouterBinding). - filter_by(router_id=router_id).first()) - - -def update_vcns_router_binding(session, router_id, **kwargs): - with session.begin(subtransactions=True): - binding = (session.query(vcns_models.VcnsRouterBinding). - filter_by(router_id=router_id).one()) - for key, value in kwargs.iteritems(): - binding[key] = value - - -def delete_vcns_router_binding(session, router_id): - with session.begin(subtransactions=True): - binding = (session.query(vcns_models.VcnsRouterBinding). - filter_by(router_id=router_id).one()) - session.delete(binding) - - -# -# Edge Firewall binding methods -# -def add_vcns_edge_firewallrule_binding(session, map_info): - with session.begin(subtransactions=True): - binding = vcns_models.VcnsEdgeFirewallRuleBinding( - rule_id=map_info['rule_id'], - rule_vseid=map_info['rule_vseid'], - edge_id=map_info['edge_id']) - session.add(binding) - return binding - - -def delete_vcns_edge_firewallrule_binding(session, id, edge_id): - with session.begin(subtransactions=True): - if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). - filter_by(rule_id=id, edge_id=edge_id).delete()): - msg = _("Rule Resource binding with id:%s not found!") % id - raise nsx_exc.NsxPluginException(err_msg=msg) - - -def get_vcns_edge_firewallrule_binding(session, id, edge_id): - with session.begin(subtransactions=True): - return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). - filter_by(rule_id=id, edge_id=edge_id).first()) - - -def get_vcns_edge_firewallrule_binding_by_vseid( - session, edge_id, rule_vseid): - with session.begin(subtransactions=True): - try: - return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding). - filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one()) - except exc.NoResultFound: - msg = _("Rule Resource binding not found!") - raise nsx_exc.NsxPluginException(err_msg=msg) - - -def cleanup_vcns_edge_firewallrule_binding(session, edge_id): - with session.begin(subtransactions=True): - session.query( - vcns_models.VcnsEdgeFirewallRuleBinding).filter_by( - edge_id=edge_id).delete() - - -def add_vcns_edge_vip_binding(session, map_info): - with session.begin(subtransactions=True): - binding = vcns_models.VcnsEdgeVipBinding( - vip_id=map_info['vip_id'], - edge_id=map_info['edge_id'], - vip_vseid=map_info['vip_vseid'], - app_profileid=map_info['app_profileid']) - session.add(binding) - - return binding - - -def get_vcns_edge_vip_binding(session, id): - with session.begin(subtransactions=True): - try: - qry = session.query(vcns_models.VcnsEdgeVipBinding) - return qry.filter_by(vip_id=id).one() - except exc.NoResultFound: - msg = _("VIP Resource binding with id:%s not found!") % id - LOG.exception(msg) - raise vcns_exc.VcnsNotFound( - resource='router_service_binding', msg=msg) - - -def delete_vcns_edge_vip_binding(session, id): - with session.begin(subtransactions=True): - qry = session.query(vcns_models.VcnsEdgeVipBinding) - if not qry.filter_by(vip_id=id).delete(): - msg = _("VIP Resource binding with id:%s not found!") % id - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - - -def add_vcns_edge_pool_binding(session, map_info): - with session.begin(subtransactions=True): - binding = vcns_models.VcnsEdgePoolBinding( - pool_id=map_info['pool_id'], - edge_id=map_info['edge_id'], - pool_vseid=map_info['pool_vseid']) - session.add(binding) - - return binding - - -def get_vcns_edge_pool_binding(session, id, edge_id): - with session.begin(subtransactions=True): - return (session.query(vcns_models.VcnsEdgePoolBinding). - filter_by(pool_id=id, edge_id=edge_id).first()) - - -def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid): - with session.begin(subtransactions=True): - try: - qry = session.query(vcns_models.VcnsEdgePoolBinding) - binding = qry.filter_by(edge_id=edge_id, - pool_vseid=pool_vseid).one() - except exc.NoResultFound: - msg = (_("Pool Resource binding with edge_id:%(edge_id)s " - "pool_vseid:%(pool_vseid)s not found!") % - {'edge_id': edge_id, 'pool_vseid': pool_vseid}) - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - return binding - - -def delete_vcns_edge_pool_binding(session, id, edge_id): - with session.begin(subtransactions=True): - qry = session.query(vcns_models.VcnsEdgePoolBinding) - if not qry.filter_by(pool_id=id, edge_id=edge_id).delete(): - msg = _("Pool Resource binding with id:%s not found!") % id - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - - -def add_vcns_edge_monitor_binding(session, map_info): - with session.begin(subtransactions=True): - binding = vcns_models.VcnsEdgeMonitorBinding( - monitor_id=map_info['monitor_id'], - edge_id=map_info['edge_id'], - monitor_vseid=map_info['monitor_vseid']) - session.add(binding) - - return binding - - -def get_vcns_edge_monitor_binding(session, id, edge_id): - with session.begin(subtransactions=True): - return (session.query(vcns_models.VcnsEdgeMonitorBinding). - filter_by(monitor_id=id, edge_id=edge_id).first()) - - -def delete_vcns_edge_monitor_binding(session, id, edge_id): - with session.begin(subtransactions=True): - qry = session.query(vcns_models.VcnsEdgeMonitorBinding) - if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete(): - msg = _("Monitor Resource binding with id:%s not found!") % id - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) diff --git a/neutron/plugins/vmware/dbexts/vcns_models.py b/neutron/plugins/vmware/dbexts/vcns_models.py deleted file mode 100644 index 847161358..000000000 --- a/neutron/plugins/vmware/dbexts/vcns_models.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sqlalchemy as sa - -from neutron.db import model_base -from neutron.db import models_v2 - - -class VcnsRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription): - """Represents the mapping between neutron router and vShield Edge.""" - - __tablename__ = 'vcns_router_bindings' - - # no ForeignKey to routers.id because for now, a router can be removed - # from routers when delete_router is executed, but the binding is only - # removed after the Edge is deleted - router_id = sa.Column(sa.String(36), - primary_key=True) - edge_id = sa.Column(sa.String(16), - nullable=True) - lswitch_id = sa.Column(sa.String(36), - nullable=False) - - -# -# VCNS Edge FW mapping tables -# -class VcnsEdgeFirewallRuleBinding(model_base.BASEV2): - """1:1 mapping between firewall rule and edge firewall rule_id.""" - - __tablename__ = 'vcns_firewall_rule_bindings' - - rule_id = sa.Column(sa.String(36), - sa.ForeignKey("firewall_rules.id"), - primary_key=True) - edge_id = sa.Column(sa.String(36), primary_key=True) - rule_vseid = sa.Column(sa.String(36)) - - -class VcnsEdgePoolBinding(model_base.BASEV2): - """Represents the mapping between neutron pool and Edge pool.""" - - __tablename__ = 'vcns_edge_pool_bindings' - - pool_id = sa.Column(sa.String(36), - sa.ForeignKey("pools.id", ondelete="CASCADE"), - primary_key=True) - edge_id = sa.Column(sa.String(36), primary_key=True) - pool_vseid = sa.Column(sa.String(36)) - - -class VcnsEdgeVipBinding(model_base.BASEV2): - """Represents the mapping between neutron vip and Edge vip.""" - - __tablename__ = 'vcns_edge_vip_bindings' - - vip_id = sa.Column(sa.String(36), - sa.ForeignKey("vips.id", ondelete="CASCADE"), - primary_key=True) - edge_id = sa.Column(sa.String(36)) - vip_vseid = sa.Column(sa.String(36)) - app_profileid = sa.Column(sa.String(36)) - - -class VcnsEdgeMonitorBinding(model_base.BASEV2): - """Represents the mapping between neutron monitor and Edge monitor.""" - - __tablename__ = 'vcns_edge_monitor_bindings' - - monitor_id = sa.Column(sa.String(36), - sa.ForeignKey("healthmonitors.id", - ondelete="CASCADE"), - primary_key=True) - edge_id = sa.Column(sa.String(36), primary_key=True) - monitor_vseid = sa.Column(sa.String(36)) diff --git a/neutron/plugins/vmware/dhcp_meta/__init__.py b/neutron/plugins/vmware/dhcp_meta/__init__.py deleted file mode 100644 index c020e3bcd..000000000 --- a/neutron/plugins/vmware/dhcp_meta/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/vmware/dhcp_meta/combined.py b/neutron/plugins/vmware/dhcp_meta/combined.py deleted file mode 100644 index 36ba563e8..000000000 --- a/neutron/plugins/vmware/dhcp_meta/combined.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.common import constants as const -from neutron.common import topics -from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc -from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc - - -class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI): - - def __init__(self, plugin, manager): - super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT) - self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager) - - def notify(self, context, data, methodname): - [resource, action, _e] = methodname.split('.') - lsn_manager = self.agentless_notifier.plugin.lsn_manager - plugin = self.agentless_notifier.plugin - if resource == 'network': - net_id = data['network']['id'] - elif resource in ['port', 'subnet']: - net_id = data[resource]['network_id'] - else: - # no valid resource - return - lsn_exists = lsn_manager.lsn_exists(context, net_id) - treat_dhcp_owner_specially = False - if lsn_exists: - # if lsn exists, the network is one created with the new model - if (resource == 'subnet' and action == 'create' and - const.DEVICE_OWNER_DHCP not in plugin.port_special_owners): - # network/subnet provisioned in the new model have a plain - # nsx lswitch port, no vif attachment - plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP) - treat_dhcp_owner_specially = True - if (resource == 'port' and action == 'update' or - resource == 'subnet'): - self.agentless_notifier.notify(context, data, methodname) - elif not lsn_exists and resource in ['port', 'subnet']: - # call notifier for the agent-based mode - super(DhcpAgentNotifyAPI, self).notify(context, data, methodname) - if treat_dhcp_owner_specially: - # if subnets belong to networks created with the old model - # dhcp port does not need to be special cased, so put things - # back, since they were modified - plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP) - - -def handle_network_dhcp_access(plugin, context, network, action): - nsx_svc.handle_network_dhcp_access(plugin, context, network, action) - - -def handle_port_dhcp_access(plugin, context, port, action): - if plugin.lsn_manager.lsn_exists(context, port['network_id']): - nsx_svc.handle_port_dhcp_access(plugin, context, port, action) - else: - nsx_rpc.handle_port_dhcp_access(plugin, context, port, action) - - -def handle_port_metadata_access(plugin, context, port, is_delete=False): - if plugin.lsn_manager.lsn_exists(context, port['network_id']): - nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete) - else: - nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete) - - -def handle_router_metadata_access(plugin, context, router_id, interface=None): - if interface: - subnet = plugin.get_subnet(context, interface['subnet_id']) - network_id = subnet['network_id'] - if plugin.lsn_manager.lsn_exists(context, network_id): - nsx_svc.handle_router_metadata_access( - plugin, context, router_id, interface) - else: - nsx_rpc.handle_router_metadata_access( - plugin, context, router_id, interface) - else: - nsx_rpc.handle_router_metadata_access( - plugin, context, router_id, interface) diff --git a/neutron/plugins/vmware/dhcp_meta/constants.py b/neutron/plugins/vmware/dhcp_meta/constants.py deleted file mode 100644 index 1e9476a5b..000000000 --- a/neutron/plugins/vmware/dhcp_meta/constants.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -from neutron.common import constants as const -from neutron.db import l3_db - -# A unique MAC to quickly identify the LSN port used for metadata services -# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. -METADATA_MAC = "fa:15:73:74:d4:74" -METADATA_PORT_ID = 'metadata:id' -METADATA_PORT_NAME = 'metadata:name' -METADATA_DEVICE_ID = 'metadata:device' -SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, - const.DEVICE_OWNER_ROUTER_GW, - l3_db.DEVICE_OWNER_ROUTER_INTF) diff --git a/neutron/plugins/vmware/dhcp_meta/lsnmanager.py b/neutron/plugins/vmware/dhcp_meta/lsnmanager.py deleted file mode 100644 index 1e8f9cb55..000000000 --- a/neutron/plugins/vmware/dhcp_meta/lsnmanager.py +++ /dev/null @@ -1,462 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo.config import cfg - -from neutron.common import exceptions as n_exc -from neutron.openstack.common.db import exception as db_exc -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as p_exc -from neutron.plugins.vmware.common import nsx_utils -from neutron.plugins.vmware.dbexts import lsn_db -from neutron.plugins.vmware.dhcp_meta import constants as const -from neutron.plugins.vmware.nsxlib import lsn as lsn_api -from neutron.plugins.vmware.nsxlib import switch as switch_api - -LOG = logging.getLogger(__name__) - -META_CONF = 'metadata-proxy' -DHCP_CONF = 'dhcp' - - -lsn_opts = [ - cfg.BoolOpt('sync_on_missing_data', default=False, - help=_('Pull LSN information from NSX in case it is missing ' - 'from the local data store. This is useful to rebuild ' - 'the local store in case of server recovery.')) -] - - -def register_lsn_opts(config): - config.CONF.register_opts(lsn_opts, "NSX_LSN") - - -class LsnManager(object): - """Manage LSN entities associated with networks.""" - - def __init__(self, plugin): - self.plugin = plugin - - @property - def cluster(self): - return self.plugin.cluster - - def lsn_exists(self, context, network_id): - """Return True if a Logical Service Node exists for the network.""" - return self.lsn_get( - context, network_id, raise_on_err=False) is not None - - def lsn_get(self, context, network_id, raise_on_err=True): - """Retrieve the LSN id associated to the network.""" - try: - return lsn_api.lsn_for_network_get(self.cluster, network_id) - except (n_exc.NotFound, api_exc.NsxApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node for ' - 'network %s'), network_id) - if raise_on_err: - raise p_exc.LsnNotFound(entity='network', - entity_id=network_id) - - def lsn_create(self, context, network_id): - """Create a LSN associated to the network.""" - try: - return lsn_api.lsn_for_network_create(self.cluster, network_id) - except api_exc.NsxApiException: - err_msg = _('Unable to create LSN for network %s') % network_id - raise p_exc.NsxPluginException(err_msg=err_msg) - - def lsn_delete(self, context, lsn_id): - """Delete a LSN given its id.""" - try: - lsn_api.lsn_delete(self.cluster, lsn_id) - except (n_exc.NotFound, api_exc.NsxApiException): - LOG.warn(_('Unable to delete Logical Service Node %s'), lsn_id) - - def lsn_delete_by_network(self, context, network_id): - """Delete a LSN associated to the network.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=False) - if lsn_id: - self.lsn_delete(context, lsn_id) - - def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): - """Retrieve LSN and LSN port for the network and the subnet.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) - if lsn_id: - try: - lsn_port_id = lsn_api.lsn_port_by_subnet_get( - self.cluster, lsn_id, subnet_id) - except (n_exc.NotFound, api_exc.NsxApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node Port for ' - 'LSN %(lsn_id)s and subnet %(subnet_id)s') - % {'lsn_id': lsn_id, 'subnet_id': subnet_id}) - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=lsn_id, - entity='subnet', - entity_id=subnet_id) - return (lsn_id, None) - else: - return (lsn_id, lsn_port_id) - else: - return (None, None) - - def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): - """Retrieve LSN and LSN port given network and mac address.""" - lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) - if lsn_id: - try: - lsn_port_id = lsn_api.lsn_port_by_mac_get( - self.cluster, lsn_id, mac) - except (n_exc.NotFound, api_exc.NsxApiException): - logger = raise_on_err and LOG.error or LOG.warn - logger(_('Unable to find Logical Service Node Port for ' - 'LSN %(lsn_id)s and mac address %(mac)s') - % {'lsn_id': lsn_id, 'mac': mac}) - if raise_on_err: - raise p_exc.LsnPortNotFound(lsn_id=lsn_id, - entity='MAC', - entity_id=mac) - return (lsn_id, None) - else: - return (lsn_id, lsn_port_id) - else: - return (None, None) - - def lsn_port_create(self, context, lsn_id, subnet_info): - """Create and return LSN port for associated subnet.""" - try: - return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) - except n_exc.NotFound: - raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) - except api_exc.NsxApiException: - err_msg = _('Unable to create port for LSN %s') % lsn_id - raise p_exc.NsxPluginException(err_msg=err_msg) - - def lsn_port_delete(self, context, lsn_id, lsn_port_id): - """Delete a LSN port from the Logical Service Node.""" - try: - lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) - except (n_exc.NotFound, api_exc.NsxApiException): - LOG.warn(_('Unable to delete LSN Port %s'), lsn_port_id) - - def lsn_port_dispose(self, context, network_id, mac_address): - """Delete a LSN port given the network and the mac address.""" - lsn_id, lsn_port_id = self.lsn_port_get_by_mac( - context, network_id, mac_address, raise_on_err=False) - if lsn_port_id: - self.lsn_port_delete(context, lsn_id, lsn_port_id) - if mac_address == const.METADATA_MAC: - try: - lswitch_port_id = switch_api.get_port_by_neutron_tag( - self.cluster, network_id, - const.METADATA_PORT_ID)['uuid'] - switch_api.delete_port( - self.cluster, network_id, lswitch_port_id) - except (n_exc.PortNotFoundOnNetwork, - api_exc.NsxApiException): - LOG.warn(_("Metadata port not found while attempting " - "to delete it from network %s"), network_id) - else: - LOG.warn(_("Unable to find Logical Services Node " - "Port with MAC %s"), mac_address) - - def lsn_port_dhcp_setup( - self, context, network_id, port_id, port_data, subnet_config=None): - """Connect network to LSN via specified port and port_data.""" - try: - lsn_id = None - switch_id = nsx_utils.get_nsx_switch_ids( - context.session, self.cluster, network_id)[0] - lswitch_port_id = switch_api.get_port_by_neutron_tag( - self.cluster, switch_id, port_id)['uuid'] - lsn_id = self.lsn_get(context, network_id) - lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) - except (n_exc.NotFound, p_exc.NsxPluginException): - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=port_id) - else: - try: - lsn_api.lsn_port_plug_network( - self.cluster, lsn_id, lsn_port_id, lswitch_port_id) - except p_exc.LsnConfigurationConflict: - self.lsn_port_delete(context, lsn_id, lsn_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=port_id) - if subnet_config: - self.lsn_port_dhcp_configure( - context, lsn_id, lsn_port_id, subnet_config) - else: - return (lsn_id, lsn_port_id) - - def lsn_port_metadata_setup(self, context, lsn_id, subnet): - """Connect subnet to specified LSN.""" - data = { - "mac_address": const.METADATA_MAC, - "ip_address": subnet['cidr'], - "subnet_id": subnet['id'] - } - network_id = subnet['network_id'] - tenant_id = subnet['tenant_id'] - lswitch_port_id = None - try: - switch_id = nsx_utils.get_nsx_switch_ids( - context.session, self.cluster, network_id)[0] - lswitch_port_id = switch_api.create_lport( - self.cluster, switch_id, tenant_id, - const.METADATA_PORT_ID, const.METADATA_PORT_NAME, - const.METADATA_DEVICE_ID, True)['uuid'] - lsn_port_id = self.lsn_port_create(context, lsn_id, data) - except (n_exc.NotFound, p_exc.NsxPluginException, - api_exc.NsxApiException): - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) - else: - try: - lsn_api.lsn_port_plug_network( - self.cluster, lsn_id, lsn_port_id, lswitch_port_id) - except p_exc.LsnConfigurationConflict: - self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) - switch_api.delete_port( - self.cluster, network_id, lswitch_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): - """Enable/disable dhcp services with the given config options.""" - is_enabled = subnet["enable_dhcp"] - dhcp_options = { - "domain_name": cfg.CONF.NSX_DHCP.domain_name, - "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, - } - dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] - dns_servers.extend(subnet["dns_nameservers"]) - if subnet['gateway_ip']: - dhcp_options["routers"] = subnet["gateway_ip"] - if dns_servers: - dhcp_options["domain_name_servers"] = ",".join(dns_servers) - if subnet["host_routes"]: - dhcp_options["classless_static_routes"] = ( - ",".join(subnet["host_routes"]) - ) - try: - lsn_api.lsn_port_dhcp_configure( - self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) - except (n_exc.NotFound, api_exc.NsxApiException): - err_msg = (_('Unable to configure dhcp for Logical Service ' - 'Node %(lsn_id)s and port %(lsn_port_id)s') - % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) - LOG.error(err_msg) - raise p_exc.NsxPluginException(err_msg=err_msg) - - def lsn_metadata_configure(self, context, subnet_id, is_enabled): - """Configure metadata service for the specified subnet.""" - subnet = self.plugin.get_subnet(context, subnet_id) - network_id = subnet['network_id'] - meta_conf = cfg.CONF.NSX_METADATA - metadata_options = { - 'metadata_server_ip': meta_conf.metadata_server_address, - 'metadata_server_port': meta_conf.metadata_server_port, - 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret - } - try: - lsn_id = self.lsn_get(context, network_id) - lsn_api.lsn_metadata_configure( - self.cluster, lsn_id, is_enabled, metadata_options) - except (p_exc.LsnNotFound, api_exc.NsxApiException): - err_msg = (_('Unable to configure metadata ' - 'for subnet %s') % subnet_id) - LOG.error(err_msg) - raise p_exc.NsxPluginException(err_msg=err_msg) - if is_enabled: - try: - # test that the lsn port exists - self.lsn_port_get(context, network_id, subnet_id) - except p_exc.LsnPortNotFound: - # this might happen if subnet had dhcp off when created - # so create one, and wire it - self.lsn_port_metadata_setup(context, lsn_id, subnet) - else: - self.lsn_port_dispose(context, network_id, const.METADATA_MAC) - - def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): - lsn_id, lsn_port_id = self.lsn_port_get( - context, network_id, subnet_id, raise_on_err=False) - try: - if lsn_id and lsn_port_id: - hdlr(self.cluster, lsn_id, lsn_port_id, data) - except (n_exc.NotFound, api_exc.NsxApiException): - LOG.error(_('Error while configuring LSN ' - 'port %s'), lsn_port_id) - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): - """Add dhcp host entry to LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_dhcp_host_add) - - def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): - """Remove dhcp host entry from LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_dhcp_host_remove) - - def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): - """Add dhcp host entry to LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_metadata_host_add) - - def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): - """Remove dhcp host entry from LSN port configuration.""" - self._lsn_port_host_conf(context, network_id, subnet_id, host, - lsn_api.lsn_port_metadata_host_remove) - - def lsn_port_update( - self, context, network_id, subnet_id, dhcp=None, meta=None): - """Update the specified configuration for the LSN port.""" - if not dhcp and not meta: - return - try: - lsn_id, lsn_port_id = self.lsn_port_get( - context, network_id, subnet_id, raise_on_err=False) - if dhcp and lsn_id and lsn_port_id: - lsn_api.lsn_port_host_entries_update( - self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) - if meta and lsn_id and lsn_port_id: - lsn_api.lsn_port_host_entries_update( - self.cluster, lsn_id, lsn_port_id, META_CONF, meta) - except api_exc.NsxApiException: - raise p_exc.PortConfigurationError( - net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) - - -class PersistentLsnManager(LsnManager): - """Add local persistent state to LSN Manager.""" - - def __init__(self, plugin): - super(PersistentLsnManager, self).__init__(plugin) - self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data - - def lsn_get(self, context, network_id, raise_on_err=True): - try: - obj = lsn_db.lsn_get_for_network( - context, network_id, raise_on_err=raise_on_err) - return obj.lsn_id if obj else None - except p_exc.LsnNotFound: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - if self.sync_on_missing: - lsn_id = super(PersistentLsnManager, self).lsn_get( - context, network_id, raise_on_err=raise_on_err) - self.lsn_save(context, network_id, lsn_id) - return lsn_id - if raise_on_err: - ctxt.reraise = True - - def lsn_save(self, context, network_id, lsn_id): - """Save LSN-Network mapping to the DB.""" - try: - lsn_db.lsn_add(context, network_id, lsn_id) - except db_exc.DBError: - err_msg = _('Unable to save LSN for network %s') % network_id - LOG.exception(err_msg) - raise p_exc.NsxPluginException(err_msg=err_msg) - - def lsn_create(self, context, network_id): - lsn_id = super(PersistentLsnManager, - self).lsn_create(context, network_id) - try: - self.lsn_save(context, network_id, lsn_id) - except p_exc.NsxPluginException: - with excutils.save_and_reraise_exception(): - super(PersistentLsnManager, self).lsn_delete(context, lsn_id) - return lsn_id - - def lsn_delete(self, context, lsn_id): - lsn_db.lsn_remove(context, lsn_id) - super(PersistentLsnManager, self).lsn_delete(context, lsn_id) - - def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): - try: - obj = lsn_db.lsn_port_get_for_subnet( - context, subnet_id, raise_on_err=raise_on_err) - return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) - except p_exc.LsnPortNotFound: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - if self.sync_on_missing: - lsn_id, lsn_port_id = ( - super(PersistentLsnManager, self).lsn_port_get( - context, network_id, subnet_id, - raise_on_err=raise_on_err)) - mac_addr = lsn_api.lsn_port_info_get( - self.cluster, lsn_id, lsn_port_id)['mac_address'] - self.lsn_port_save( - context, lsn_port_id, subnet_id, mac_addr, lsn_id) - return (lsn_id, lsn_port_id) - if raise_on_err: - ctxt.reraise = True - - def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): - try: - obj = lsn_db.lsn_port_get_for_mac( - context, mac, raise_on_err=raise_on_err) - return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) - except p_exc.LsnPortNotFound: - with excutils.save_and_reraise_exception() as ctxt: - ctxt.reraise = False - if self.sync_on_missing: - lsn_id, lsn_port_id = ( - super(PersistentLsnManager, self).lsn_port_get_by_mac( - context, network_id, mac, - raise_on_err=raise_on_err)) - subnet_id = lsn_api.lsn_port_info_get( - self.cluster, lsn_id, lsn_port_id).get('subnet_id') - self.lsn_port_save( - context, lsn_port_id, subnet_id, mac, lsn_id) - return (lsn_id, lsn_port_id) - if raise_on_err: - ctxt.reraise = True - - def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): - """Save LSN Port information to the DB.""" - try: - lsn_db.lsn_port_add_for_lsn( - context, lsn_port_id, subnet_id, mac_addr, lsn_id) - except db_exc.DBError: - err_msg = _('Unable to save LSN port for subnet %s') % subnet_id - LOG.exception(err_msg) - raise p_exc.NsxPluginException(err_msg=err_msg) - - def lsn_port_create(self, context, lsn_id, subnet_info): - lsn_port_id = super(PersistentLsnManager, - self).lsn_port_create(context, lsn_id, subnet_info) - try: - self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], - subnet_info['mac_address'], lsn_id) - except p_exc.NsxPluginException: - with excutils.save_and_reraise_exception(): - super(PersistentLsnManager, self).lsn_port_delete( - context, lsn_id, lsn_port_id) - return lsn_port_id - - def lsn_port_delete(self, context, lsn_id, lsn_port_id): - lsn_db.lsn_port_remove(context, lsn_port_id) - super(PersistentLsnManager, self).lsn_port_delete( - context, lsn_id, lsn_port_id) diff --git a/neutron/plugins/vmware/dhcp_meta/migration.py b/neutron/plugins/vmware/dhcp_meta/migration.py deleted file mode 100644 index 0f1b32b77..000000000 --- a/neutron/plugins/vmware/dhcp_meta/migration.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.common import constants as const -from neutron.common import exceptions as n_exc -from neutron.extensions import external_net -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import exceptions as p_exc -from neutron.plugins.vmware.dhcp_meta import nsx -from neutron.plugins.vmware.dhcp_meta import rpc - -LOG = logging.getLogger(__name__) - - -class DhcpMetadataBuilder(object): - - def __init__(self, plugin, agent_notifier): - self.plugin = plugin - self.notifier = agent_notifier - - def dhcp_agent_get_all(self, context, network_id): - """Return the agents managing the network.""" - return self.plugin.list_dhcp_agents_hosting_network( - context, network_id)['agents'] - - def dhcp_port_get_all(self, context, network_id): - """Return the dhcp ports allocated for the network.""" - filters = { - 'network_id': [network_id], - 'device_owner': [const.DEVICE_OWNER_DHCP] - } - return self.plugin.get_ports(context, filters=filters) - - def router_id_get(self, context, subnet=None): - """Return the router and interface used for the subnet.""" - if not subnet: - return - network_id = subnet['network_id'] - filters = { - 'network_id': [network_id], - 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] - } - ports = self.plugin.get_ports(context, filters=filters) - for port in ports: - if port['fixed_ips'][0]['subnet_id'] == subnet['id']: - return port['device_id'] - - def metadata_deallocate(self, context, router_id, subnet_id): - """Deallocate metadata services for the subnet.""" - interface = {'subnet_id': subnet_id} - self.plugin.remove_router_interface(context, router_id, interface) - - def metadata_allocate(self, context, router_id, subnet_id): - """Allocate metadata resources for the subnet via the router.""" - interface = {'subnet_id': subnet_id} - self.plugin.add_router_interface(context, router_id, interface) - - def dhcp_deallocate(self, context, network_id, agents, ports): - """Deallocate dhcp resources for the network.""" - for agent in agents: - self.plugin.remove_network_from_dhcp_agent( - context, agent['id'], network_id) - for port in ports: - try: - self.plugin.delete_port(context, port['id']) - except n_exc.PortNotFound: - LOG.error(_('Port %s is already gone'), port['id']) - - def dhcp_allocate(self, context, network_id, subnet): - """Allocate dhcp resources for the subnet.""" - # Create LSN resources - network_data = {'id': network_id} - nsx.handle_network_dhcp_access(self.plugin, context, - network_data, 'create_network') - if subnet: - subnet_data = {'subnet': subnet} - self.notifier.notify(context, subnet_data, 'subnet.create.end') - # Get DHCP host and metadata entries created for the LSN - port = { - 'network_id': network_id, - 'fixed_ips': [{'subnet_id': subnet['id']}] - } - self.notifier.notify(context, {'port': port}, 'port.update.end') - - -class MigrationManager(object): - - def __init__(self, plugin, lsn_manager, agent_notifier): - self.plugin = plugin - self.manager = lsn_manager - self.builder = DhcpMetadataBuilder(plugin, agent_notifier) - - def validate(self, context, network_id): - """Validate and return subnet's dhcp info for migration.""" - network = self.plugin.get_network(context, network_id) - - if self.manager.lsn_exists(context, network_id): - reason = _("LSN already exist") - raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) - - if network[external_net.EXTERNAL]: - reason = _("Cannot migrate an external network") - raise n_exc.BadRequest(resource='network', msg=reason) - - filters = {'network_id': [network_id]} - subnets = self.plugin.get_subnets(context, filters=filters) - count = len(subnets) - if count == 0: - return None - elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: - reason = _("Cannot migrate a 'metadata' network") - raise n_exc.BadRequest(resource='network', msg=reason) - elif count > 1: - reason = _("Unable to support multiple subnets per network") - raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) - else: - return subnets[0] - - def migrate(self, context, network_id, subnet=None): - """Migrate subnet resources to LSN.""" - router_id = self.builder.router_id_get(context, subnet) - if router_id and subnet: - # Deallocate resources taken for the router, if any - self.builder.metadata_deallocate(context, router_id, subnet['id']) - if subnet: - # Deallocate reources taken for the agent, if any - agents = self.builder.dhcp_agent_get_all(context, network_id) - ports = self.builder.dhcp_port_get_all(context, network_id) - self.builder.dhcp_deallocate(context, network_id, agents, ports) - # (re)create the configuration for LSN - self.builder.dhcp_allocate(context, network_id, subnet) - if router_id and subnet: - # Allocate resources taken for the router, if any - self.builder.metadata_allocate(context, router_id, subnet['id']) - - def report(self, context, network_id, subnet_id=None): - """Return a report of the dhcp and metadata resources in use.""" - if subnet_id: - lsn_id, lsn_port_id = self.manager.lsn_port_get( - context, network_id, subnet_id, raise_on_err=False) - else: - filters = {'network_id': [network_id]} - subnets = self.plugin.get_subnets(context, filters=filters) - if subnets: - lsn_id, lsn_port_id = self.manager.lsn_port_get( - context, network_id, subnets[0]['id'], raise_on_err=False) - else: - lsn_id = self.manager.lsn_get(context, network_id, - raise_on_err=False) - lsn_port_id = None - if lsn_id: - ports = [lsn_port_id] if lsn_port_id else [] - report = { - 'type': 'lsn', - 'services': [lsn_id], - 'ports': ports - } - else: - agents = self.builder.dhcp_agent_get_all(context, network_id) - ports = self.builder.dhcp_port_get_all(context, network_id) - report = { - 'type': 'agent', - 'services': [a['id'] for a in agents], - 'ports': [p['id'] for p in ports] - } - return report diff --git a/neutron/plugins/vmware/dhcp_meta/nsx.py b/neutron/plugins/vmware/dhcp_meta/nsx.py deleted file mode 100644 index 5c1f3971a..000000000 --- a/neutron/plugins/vmware/dhcp_meta/nsx.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright 2013 VMware, Inc. - -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo.config import cfg - -from neutron.api.v2 import attributes as attr -from neutron.common import constants as const -from neutron.common import exceptions as n_exc -from neutron.db import db_base_plugin_v2 -from neutron.db import l3_db -from neutron.extensions import external_net -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import exceptions as p_exc -from neutron.plugins.vmware.dhcp_meta import constants as d_const -from neutron.plugins.vmware.nsxlib import lsn as lsn_api - -LOG = logging.getLogger(__name__) - - -dhcp_opts = [ - cfg.ListOpt('extra_domain_name_servers', - deprecated_group='NVP_DHCP', - default=[], - help=_('Comma separated list of additional ' - 'domain name servers')), - cfg.StrOpt('domain_name', - deprecated_group='NVP_DHCP', - default='openstacklocal', - help=_('Domain to use for building the hostnames')), - cfg.IntOpt('default_lease_time', default=43200, - deprecated_group='NVP_DHCP', - help=_("Default DHCP lease time")), -] - - -metadata_opts = [ - cfg.StrOpt('metadata_server_address', - deprecated_group='NVP_METADATA', - default='127.0.0.1', - help=_("IP address used by Metadata server.")), - cfg.IntOpt('metadata_server_port', - deprecated_group='NVP_METADATA', - default=8775, - help=_("TCP Port used by Metadata server.")), - cfg.StrOpt('metadata_shared_secret', - deprecated_group='NVP_METADATA', - default='', - help=_('Shared secret to sign instance-id request'), - secret=True) -] - - -def register_dhcp_opts(config): - config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") - - -def register_metadata_opts(config): - config.CONF.register_opts(metadata_opts, group="NSX_METADATA") - - -class DhcpAgentNotifyAPI(object): - - def __init__(self, plugin, lsn_manager): - self.plugin = plugin - self.lsn_manager = lsn_manager - self._handle_subnet_dhcp_access = {'create': self._subnet_create, - 'update': self._subnet_update, - 'delete': self._subnet_delete} - - def notify(self, context, data, methodname): - [resource, action, _e] = methodname.split('.') - if resource == 'subnet': - self._handle_subnet_dhcp_access[action](context, data['subnet']) - elif resource == 'port' and action == 'update': - self._port_update(context, data['port']) - - def _port_update(self, context, port): - # With no fixed IP's there's nothing that can be updated - if not port["fixed_ips"]: - return - network_id = port['network_id'] - subnet_id = port["fixed_ips"][0]['subnet_id'] - filters = {'network_id': [network_id]} - # Because NSX does not support updating a single host entry we - # got to build the whole list from scratch and update in bulk - ports = self.plugin.get_ports(context, filters) - if not ports: - return - dhcp_conf = [ - {'mac_address': p['mac_address'], - 'ip_address': p["fixed_ips"][0]['ip_address']} - for p in ports if is_user_port(p) - ] - meta_conf = [ - {'instance_id': p['device_id'], - 'ip_address': p["fixed_ips"][0]['ip_address']} - for p in ports if is_user_port(p, check_dev_id=True) - ] - self.lsn_manager.lsn_port_update( - context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) - - def _subnet_create(self, context, subnet, clean_on_err=True): - if subnet['enable_dhcp']: - network_id = subnet['network_id'] - # Create port for DHCP service - dhcp_port = { - "name": "", - "admin_state_up": True, - "device_id": "", - "device_owner": const.DEVICE_OWNER_DHCP, - "network_id": network_id, - "tenant_id": subnet["tenant_id"], - "mac_address": attr.ATTR_NOT_SPECIFIED, - "fixed_ips": [{"subnet_id": subnet['id']}] - } - try: - # This will end up calling handle_port_dhcp_access - # down below as well as handle_port_metadata_access - self.plugin.create_port(context, {'port': dhcp_port}) - except p_exc.PortConfigurationError as e: - err_msg = (_("Error while creating subnet %(cidr)s for " - "network %(network)s. Please, contact " - "administrator") % - {"cidr": subnet["cidr"], - "network": network_id}) - LOG.error(err_msg) - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - self.plugin, context, e.port_id) - if clean_on_err: - self.plugin.delete_subnet(context, subnet['id']) - raise n_exc.Conflict() - - def _subnet_update(self, context, subnet): - network_id = subnet['network_id'] - try: - lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( - context, network_id, subnet['id']) - self.lsn_manager.lsn_port_dhcp_configure( - context, lsn_id, lsn_port_id, subnet) - except p_exc.LsnPortNotFound: - # It's possible that the subnet was created with dhcp off; - # check if the subnet was uplinked onto a router, and if so - # remove the patch attachment between the metadata port and - # the lsn port, in favor on the one we'll be creating during - # _subnet_create - self.lsn_manager.lsn_port_dispose( - context, network_id, d_const.METADATA_MAC) - # also, check that a dhcp port exists first and provision it - # accordingly - filters = dict(network_id=[network_id], - device_owner=[const.DEVICE_OWNER_DHCP]) - ports = self.plugin.get_ports(context, filters=filters) - if ports: - handle_port_dhcp_access( - self.plugin, context, ports[0], 'create_port') - else: - self._subnet_create(context, subnet, clean_on_err=False) - - def _subnet_delete(self, context, subnet): - # FIXME(armando-migliaccio): it looks like that a subnet filter - # is ineffective; so filter by network for now. - network_id = subnet['network_id'] - filters = dict(network_id=[network_id], - device_owner=[const.DEVICE_OWNER_DHCP]) - # FIXME(armando-migliaccio): this may be race-y - ports = self.plugin.get_ports(context, filters=filters) - if ports: - # This will end up calling handle_port_dhcp_access - # down below as well as handle_port_metadata_access - self.plugin.delete_port(context, ports[0]['id']) - - -def is_user_port(p, check_dev_id=False): - usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS - return usable if not check_dev_id else usable and p['device_id'] - - -def check_services_requirements(cluster): - ver = cluster.api_client.get_version() - # It sounds like 4.1 is the first one where DHCP in NSX - # will have the experimental feature - if ver.major >= 4 and ver.minor >= 1: - cluster_id = cfg.CONF.default_service_cluster_uuid - if not lsn_api.service_cluster_exists(cluster, cluster_id): - raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) - else: - raise p_exc.InvalidVersion(version=ver) - - -def handle_network_dhcp_access(plugin, context, network, action): - LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") - % {"action": action, "resource": network}) - if action == 'create_network': - network_id = network['id'] - if network.get(external_net.EXTERNAL): - LOG.info(_("Network %s is external: no LSN to create"), network_id) - return - plugin.lsn_manager.lsn_create(context, network_id) - elif action == 'delete_network': - # NOTE(armando-migliaccio): on delete_network, network - # is just the network id - network_id = network - plugin.lsn_manager.lsn_delete_by_network(context, network_id) - LOG.info(_("Logical Services Node for network " - "%s configured successfully"), network_id) - - -def handle_port_dhcp_access(plugin, context, port, action): - LOG.info(_("Performing DHCP %(action)s for resource: %(resource)s") - % {"action": action, "resource": port}) - if port["device_owner"] == const.DEVICE_OWNER_DHCP: - network_id = port["network_id"] - if action == "create_port": - # at this point the port must have a subnet and a fixed ip - subnet_id = port["fixed_ips"][0]['subnet_id'] - subnet = plugin.get_subnet(context, subnet_id) - subnet_data = { - "mac_address": port["mac_address"], - "ip_address": subnet['cidr'], - "subnet_id": subnet['id'] - } - try: - plugin.lsn_manager.lsn_port_dhcp_setup( - context, network_id, port['id'], subnet_data, subnet) - except p_exc.PortConfigurationError: - err_msg = (_("Error while configuring DHCP for " - "port %s"), port['id']) - LOG.error(err_msg) - raise n_exc.NeutronException() - elif action == "delete_port": - plugin.lsn_manager.lsn_port_dispose(context, network_id, - port['mac_address']) - elif port["device_owner"] != const.DEVICE_OWNER_DHCP: - if port.get("fixed_ips"): - # do something only if there are IP's and dhcp is enabled - subnet_id = port["fixed_ips"][0]['subnet_id'] - if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: - LOG.info(_("DHCP is disabled for subnet %s: nothing " - "to do"), subnet_id) - return - host_data = { - "mac_address": port["mac_address"], - "ip_address": port["fixed_ips"][0]['ip_address'] - } - network_id = port["network_id"] - if action == "create_port": - handler = plugin.lsn_manager.lsn_port_dhcp_host_add - elif action == "delete_port": - handler = plugin.lsn_manager.lsn_port_dhcp_host_remove - try: - handler(context, network_id, subnet_id, host_data) - except p_exc.PortConfigurationError: - with excutils.save_and_reraise_exception(): - if action == 'create_port': - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - plugin, context, port['id']) - LOG.info(_("DHCP for port %s configured successfully"), port['id']) - - -def handle_port_metadata_access(plugin, context, port, is_delete=False): - if is_user_port(port, check_dev_id=True): - network_id = port["network_id"] - network = plugin.get_network(context, network_id) - if network[external_net.EXTERNAL]: - LOG.info(_("Network %s is external: nothing to do"), network_id) - return - subnet_id = port["fixed_ips"][0]['subnet_id'] - host_data = { - "instance_id": port["device_id"], - "tenant_id": port["tenant_id"], - "ip_address": port["fixed_ips"][0]['ip_address'] - } - LOG.info(_("Configuring metadata entry for port %s"), port) - if not is_delete: - handler = plugin.lsn_manager.lsn_port_meta_host_add - else: - handler = plugin.lsn_manager.lsn_port_meta_host_remove - try: - handler(context, network_id, subnet_id, host_data) - except p_exc.PortConfigurationError: - with excutils.save_and_reraise_exception(): - if not is_delete: - db_base_plugin_v2.NeutronDbPluginV2.delete_port( - plugin, context, port['id']) - LOG.info(_("Metadata for port %s configured successfully"), port['id']) - - -def handle_router_metadata_access(plugin, context, router_id, interface=None): - LOG.info(_("Handle metadata access via router: %(r)s and " - "interface %(i)s") % {'r': router_id, 'i': interface}) - if interface: - try: - plugin.get_port(context, interface['port_id']) - is_enabled = True - except n_exc.NotFound: - is_enabled = False - subnet_id = interface['subnet_id'] - try: - plugin.lsn_manager.lsn_metadata_configure( - context, subnet_id, is_enabled) - except p_exc.NsxPluginException: - with excutils.save_and_reraise_exception(): - if is_enabled: - l3_db.L3_NAT_db_mixin.remove_router_interface( - plugin, context, router_id, interface) - LOG.info(_("Metadata for router %s handled successfully"), router_id) diff --git a/neutron/plugins/vmware/dhcp_meta/rpc.py b/neutron/plugins/vmware/dhcp_meta/rpc.py deleted file mode 100644 index 9d409d01a..000000000 --- a/neutron/plugins/vmware/dhcp_meta/rpc.py +++ /dev/null @@ -1,222 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from eventlet import greenthread -import netaddr -from oslo.config import cfg - -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.api.v2 import attributes -from neutron.common import constants as const -from neutron.common import exceptions as ntn_exc -from neutron.common import rpc_compat -from neutron.db import db_base_plugin_v2 -from neutron.db import dhcp_rpc_base -from neutron.db import l3_db -from neutron.db import models_v2 -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import config -from neutron.plugins.vmware.common import exceptions as nsx_exc - -LOG = logging.getLogger(__name__) - -METADATA_DEFAULT_PREFIX = 30 -METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX -METADATA_GATEWAY_IP = '169.254.169.253' -METADATA_DHCP_ROUTE = '169.254.169.254/32' - - -class NSXRpcCallbacks(rpc_compat.RpcCallback, - dhcp_rpc_base.DhcpRpcCallbackMixin): - - RPC_API_VERSION = '1.1' - - -def handle_network_dhcp_access(plugin, context, network, action): - pass - - -def handle_port_dhcp_access(plugin, context, port_data, action): - active_port = (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT - and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP - and port_data.get('fixed_ips', [])) - if active_port: - subnet_id = port_data['fixed_ips'][0]['subnet_id'] - subnet = plugin.get_subnet(context, subnet_id) - _notify_rpc_agent(context, {'subnet': subnet}, 'subnet.update.end') - - -def handle_port_metadata_access(plugin, context, port, is_delete=False): - if (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT and - port.get('device_owner') == const.DEVICE_OWNER_DHCP): - if port.get('fixed_ips', []) or is_delete: - fixed_ip = port['fixed_ips'][0] - query = context.session.query(models_v2.Subnet) - subnet = query.filter( - models_v2.Subnet.id == fixed_ip['subnet_id']).one() - # If subnet does not have a gateway do not create metadata - # route. This is done via the enable_isolated_metadata - # option if desired. - if not subnet.get('gateway_ip'): - LOG.info(_('Subnet %s does not have a gateway, the metadata ' - 'route will not be created'), subnet['id']) - return - metadata_routes = [r for r in subnet.routes - if r['destination'] == METADATA_DHCP_ROUTE] - if metadata_routes: - # We should have only a single metadata route at any time - # because the route logic forbids two routes with the same - # destination. Update next hop with the provided IP address - if not is_delete: - metadata_routes[0].nexthop = fixed_ip['ip_address'] - else: - context.session.delete(metadata_routes[0]) - else: - # add the metadata route - route = models_v2.SubnetRoute( - subnet_id=subnet.id, - destination=METADATA_DHCP_ROUTE, - nexthop=fixed_ip['ip_address']) - context.session.add(route) - - -def handle_router_metadata_access(plugin, context, router_id, interface=None): - if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT: - LOG.debug(_("Metadata access network is disabled")) - return - if not cfg.CONF.allow_overlapping_ips: - LOG.warn(_("Overlapping IPs must be enabled in order to setup " - "the metadata access network")) - return - ctx_elevated = context.elevated() - device_filter = {'device_id': [router_id], - 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} - # Retrieve ports calling database plugin - ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( - plugin, ctx_elevated, filters=device_filter) - try: - if ports: - if (interface and - not _find_metadata_port(plugin, ctx_elevated, ports)): - _create_metadata_access_network( - plugin, ctx_elevated, router_id) - elif len(ports) == 1: - # The only port left might be the metadata port - _destroy_metadata_access_network( - plugin, ctx_elevated, router_id, ports) - else: - LOG.debug(_("No router interface found for router '%s'. " - "No metadata access network should be " - "created or destroyed"), router_id) - # TODO(salvatore-orlando): A better exception handling in the - # NSX plugin would allow us to improve error handling here - except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, - api_exc.NsxApiException): - # Any exception here should be regarded as non-fatal - LOG.exception(_("An error occurred while operating on the " - "metadata access network for router:'%s'"), - router_id) - - -def _find_metadata_port(plugin, context, ports): - for port in ports: - for fixed_ip in port['fixed_ips']: - cidr = netaddr.IPNetwork( - plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr']) - if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR): - return port - - -def _create_metadata_access_network(plugin, context, router_id): - # Add network - # Network name is likely to be truncated on NSX - net_data = {'name': 'meta-%s' % router_id, - 'tenant_id': '', # intentionally not set - 'admin_state_up': True, - 'port_security_enabled': False, - 'shared': False, - 'status': const.NET_STATUS_ACTIVE} - meta_net = plugin.create_network(context, - {'network': net_data}) - greenthread.sleep(0) # yield - plugin.schedule_network(context, meta_net) - greenthread.sleep(0) # yield - # From this point on there will be resources to garbage-collect - # in case of failures - meta_sub = None - try: - # Add subnet - subnet_data = {'network_id': meta_net['id'], - 'tenant_id': '', # intentionally not set - 'name': 'meta-%s' % router_id, - 'ip_version': 4, - 'shared': False, - 'cidr': METADATA_SUBNET_CIDR, - 'enable_dhcp': True, - # Ensure default allocation pool is generated - 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, - 'gateway_ip': METADATA_GATEWAY_IP, - 'dns_nameservers': [], - 'host_routes': []} - meta_sub = plugin.create_subnet(context, - {'subnet': subnet_data}) - greenthread.sleep(0) # yield - plugin.add_router_interface(context, router_id, - {'subnet_id': meta_sub['id']}) - greenthread.sleep(0) # yield - # Tell to start the metadata agent proxy, only if we had success - _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end') - except (ntn_exc.NeutronException, - nsx_exc.NsxPluginException, - api_exc.NsxApiException): - # It is not necessary to explicitly delete the subnet - # as it will be removed with the network - plugin.delete_network(context, meta_net['id']) - - -def _destroy_metadata_access_network(plugin, context, router_id, ports): - if not ports: - return - meta_port = _find_metadata_port(plugin, context, ports) - if not meta_port: - return - meta_net_id = meta_port['network_id'] - meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] - plugin.remove_router_interface( - context, router_id, {'port_id': meta_port['id']}) - greenthread.sleep(0) # yield - context.session.expunge_all() - try: - # Remove network (this will remove the subnet too) - plugin.delete_network(context, meta_net_id) - greenthread.sleep(0) # yield - except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, - api_exc.NsxApiException): - # must re-add the router interface - plugin.add_router_interface(context, router_id, - {'subnet_id': meta_sub_id}) - # Tell to stop the metadata agent proxy - _notify_rpc_agent( - context, {'network': {'id': meta_net_id}}, 'network.delete.end') - - -def _notify_rpc_agent(context, payload, event): - if cfg.CONF.dhcp_agent_notification: - dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() - dhcp_notifier.notify(context, payload, event) diff --git a/neutron/plugins/vmware/dhcpmeta_modes.py b/neutron/plugins/vmware/dhcpmeta_modes.py deleted file mode 100644 index 0ce2112f6..000000000 --- a/neutron/plugins/vmware/dhcpmeta_modes.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo.config import cfg - -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api -from neutron.common import constants as const -from neutron.common import rpc_compat -from neutron.common import topics -from neutron.db import agents_db -from neutron.openstack.common import importutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import config -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.dhcp_meta import combined -from neutron.plugins.vmware.dhcp_meta import lsnmanager -from neutron.plugins.vmware.dhcp_meta import migration -from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc -from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc -from neutron.plugins.vmware.extensions import lsn - -LOG = logging.getLogger(__name__) - - -class DhcpMetadataAccess(object): - - def setup_dhcpmeta_access(self): - """Initialize support for DHCP and Metadata services.""" - self._init_extensions() - if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: - self._setup_rpc_dhcp_metadata() - mod = nsx_rpc - elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: - self._setup_nsx_dhcp_metadata() - mod = nsx_svc - elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: - notifier = self._setup_nsx_dhcp_metadata() - self._setup_rpc_dhcp_metadata(notifier=notifier) - mod = combined - else: - error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode - LOG.error(error) - raise nsx_exc.NsxPluginException(err_msg=error) - self.handle_network_dhcp_access_delegate = ( - mod.handle_network_dhcp_access - ) - self.handle_port_dhcp_access_delegate = ( - mod.handle_port_dhcp_access - ) - self.handle_port_metadata_access_delegate = ( - mod.handle_port_metadata_access - ) - self.handle_metadata_access_delegate = ( - mod.handle_router_metadata_access - ) - - def _setup_rpc_dhcp_metadata(self, notifier=None): - self.topic = topics.PLUGIN - self.conn = rpc_compat.create_connection(new=True) - self.endpoints = [nsx_rpc.NSXRpcCallbacks(), - agents_db.AgentExtRpcCallback()] - self.conn.create_consumer(self.topic, self.endpoints, fanout=False) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( - notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) - self.conn.consume_in_threads() - self.network_scheduler = importutils.import_object( - cfg.CONF.network_scheduler_driver - ) - self.supported_extension_aliases.extend( - ['agent', 'dhcp_agent_scheduler']) - - def _setup_nsx_dhcp_metadata(self): - self._check_services_requirements() - nsx_svc.register_dhcp_opts(cfg) - nsx_svc.register_metadata_opts(cfg) - lsnmanager.register_lsn_opts(cfg) - lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference) - self.lsn_manager = lsn_manager - if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: - notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference, - lsn_manager) - self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier - # In agentless mode, ports whose owner is DHCP need to - # be special cased; so add it to the list of special - # owners list - if const.DEVICE_OWNER_DHCP not in self.port_special_owners: - self.port_special_owners.append(const.DEVICE_OWNER_DHCP) - elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: - # This becomes ineffective, as all new networks creations - # are handled by Logical Services Nodes in NSX - cfg.CONF.set_override('network_auto_schedule', False) - LOG.warn(_('network_auto_schedule has been disabled')) - notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, - lsn_manager) - self.supported_extension_aliases.append(lsn.EXT_ALIAS) - # Add the capability to migrate dhcp and metadata services over - self.migration_manager = ( - migration.MigrationManager( - self.safe_reference, lsn_manager, notifier)) - return notifier - - def _init_extensions(self): - extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler') - for ext in extensions: - if ext in self.supported_extension_aliases: - self.supported_extension_aliases.remove(ext) - - def _check_services_requirements(self): - try: - error = None - nsx_svc.check_services_requirements(self.cluster) - except nsx_exc.InvalidVersion: - error = _("Unable to run Neutron with config option '%s', as NSX " - "does not support it") % cfg.CONF.NSX.agent_mode - except nsx_exc.ServiceClusterUnavailable: - error = _("Unmet dependency for config option " - "'%s'") % cfg.CONF.NSX.agent_mode - if error: - LOG.exception(error) - raise nsx_exc.NsxPluginException(err_msg=error) - - def get_lsn(self, context, network_id, fields=None): - report = self.migration_manager.report(context, network_id) - return {'network': network_id, 'report': report} - - def create_lsn(self, context, lsn): - network_id = lsn['lsn']['network'] - subnet = self.migration_manager.validate(context, network_id) - subnet_id = None if not subnet else subnet['id'] - self.migration_manager.migrate(context, network_id, subnet) - r = self.migration_manager.report(context, network_id, subnet_id) - return {'network': network_id, 'report': r} - - def handle_network_dhcp_access(self, context, network, action): - self.handle_network_dhcp_access_delegate(self.safe_reference, context, - network, action) - - def handle_port_dhcp_access(self, context, port_data, action): - self.handle_port_dhcp_access_delegate(self.safe_reference, context, - port_data, action) - - def handle_port_metadata_access(self, context, port, is_delete=False): - self.handle_port_metadata_access_delegate(self.safe_reference, context, - port, is_delete) - - def handle_router_metadata_access(self, context, - router_id, interface=None): - self.handle_metadata_access_delegate(self.safe_reference, context, - router_id, interface) diff --git a/neutron/plugins/vmware/extensions/__init__.py b/neutron/plugins/vmware/extensions/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/extensions/distributedrouter.py b/neutron/plugins/vmware/extensions/distributedrouter.py deleted file mode 100644 index aa6949b82..000000000 --- a/neutron/plugins/vmware/extensions/distributedrouter.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api.v2 import attributes - - -def convert_to_boolean_if_not_none(data): - if data is not None: - return attributes.convert_to_boolean(data) - return data - - -DISTRIBUTED = 'distributed' -EXTENDED_ATTRIBUTES_2_0 = { - 'routers': { - DISTRIBUTED: {'allow_post': True, 'allow_put': False, - 'convert_to': convert_to_boolean_if_not_none, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - } -} - - -class Distributedrouter(object): - """Extension class supporting distributed router.""" - - @classmethod - def get_name(cls): - return "Distributed Router" - - @classmethod - def get_alias(cls): - return "dist-router" - - @classmethod - def get_description(cls): - return "Enables configuration of NSX Distributed routers." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/dist-router/api/v1.0" - - @classmethod - def get_updated(cls): - return "2013-08-1T10:00:00-00:00" - - def get_required_extensions(self): - return ["router"] - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - return [] - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/extensions/lsn.py b/neutron/plugins/vmware/extensions/lsn.py deleted file mode 100644 index 4a7d3ca3d..000000000 --- a/neutron/plugins/vmware/extensions/lsn.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.api import extensions -from neutron.api.v2 import base -from neutron import manager - - -EXT_ALIAS = 'lsn' -COLLECTION_NAME = "%ss" % EXT_ALIAS - -RESOURCE_ATTRIBUTE_MAP = { - COLLECTION_NAME: { - 'network': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'is_visible': True}, - 'report': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'required_by_policy': True, - 'validate': {'type:string': None}, 'is_visible': True}, - }, -} - - -class Lsn(object): - """Enable LSN configuration for Neutron NSX networks.""" - - @classmethod - def get_name(cls): - return "Logical Service Node configuration" - - @classmethod - def get_alias(cls): - return EXT_ALIAS - - @classmethod - def get_description(cls): - return "Enables configuration of NSX Logical Services Node." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/%s/api/v2.0" % EXT_ALIAS - - @classmethod - def get_updated(cls): - return "2013-10-05T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - resource_name = EXT_ALIAS - collection_name = resource_name.replace('_', '-') + "s" - params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) - controller = base.create_resource(collection_name, - resource_name, - plugin, params, allow_bulk=False) - ex = extensions.ResourceExtension(collection_name, controller) - exts.append(ex) - return exts - - def get_extended_resources(self, version): - if version == "2.0": - return RESOURCE_ATTRIBUTE_MAP - else: - return {} diff --git a/neutron/plugins/vmware/extensions/maclearning.py b/neutron/plugins/vmware/extensions/maclearning.py deleted file mode 100644 index 21c669150..000000000 --- a/neutron/plugins/vmware/extensions/maclearning.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api.v2 import attributes - - -MAC_LEARNING = 'mac_learning_enabled' -EXTENDED_ATTRIBUTES_2_0 = { - 'ports': { - MAC_LEARNING: {'allow_post': True, 'allow_put': True, - 'convert_to': attributes.convert_to_boolean, - 'default': attributes.ATTR_NOT_SPECIFIED, - 'is_visible': True}, - } -} - - -class Maclearning(object): - """Extension class supporting port mac learning.""" - - @classmethod - def get_name(cls): - return "MAC Learning" - - @classmethod - def get_alias(cls): - return "mac-learning" - - @classmethod - def get_description(cls): - return "Provides MAC learning capabilities." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/maclearning/api/v1.0" - - @classmethod - def get_updated(cls): - return "2013-05-1T10:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - return [] - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/extensions/networkgw.py b/neutron/plugins/vmware/extensions/networkgw.py deleted file mode 100644 index 2cb650b40..000000000 --- a/neutron/plugins/vmware/extensions/networkgw.py +++ /dev/null @@ -1,251 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc - -from oslo.config import cfg - -from neutron.api.v2 import attributes -from neutron.api.v2 import resource_helper -from neutron.plugins.vmware.common import utils - -GATEWAY_RESOURCE_NAME = "network_gateway" -DEVICE_RESOURCE_NAME = "gateway_device" -# Use dash for alias and collection name -EXT_ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-') -NETWORK_GATEWAYS = "%ss" % EXT_ALIAS -GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-') -DEVICE_ID_ATTR = 'id' -IFACE_NAME_ATTR = 'interface_name' - -# Attribute Map for Network Gateway Resource -# TODO(salvatore-orlando): add admin state as other neutron resources -RESOURCE_ATTRIBUTE_MAP = { - NETWORK_GATEWAYS: { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': None}, - 'is_visible': True, 'default': ''}, - 'default': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'devices': {'allow_post': True, 'allow_put': False, - 'validate': {'type:device_list': None}, - 'is_visible': True}, - 'ports': {'allow_post': False, 'allow_put': False, - 'default': [], - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'required_by_policy': True, - 'is_visible': True} - }, - GATEWAY_DEVICES: { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'name': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': None}, - 'is_visible': True, 'default': ''}, - 'client_certificate': {'allow_post': True, 'allow_put': True, - 'validate': {'type:string': None}, - 'is_visible': True}, - 'connector_type': {'allow_post': True, 'allow_put': True, - 'validate': {'type:connector_type': None}, - 'is_visible': True}, - 'connector_ip': {'allow_post': True, 'allow_put': True, - 'validate': {'type:ip_address': None}, - 'is_visible': True}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'required_by_policy': True, - 'is_visible': True}, - 'status': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - } -} - - -def _validate_device_list(data, valid_values=None): - """Validate the list of service definitions.""" - if not data: - # Devices must be provided - msg = _("Cannot create a gateway with an empty device list") - return msg - try: - for device in data: - key_specs = {DEVICE_ID_ATTR: - {'type:regex': attributes.UUID_PATTERN, - 'required': True}, - IFACE_NAME_ATTR: - {'type:string': None, - 'required': False}} - err_msg = attributes._validate_dict( - device, key_specs=key_specs) - if err_msg: - return err_msg - unexpected_keys = [key for key in device if key not in key_specs] - if unexpected_keys: - err_msg = (_("Unexpected keys found in device description:%s") - % ",".join(unexpected_keys)) - return err_msg - except TypeError: - return (_("%s: provided data are not iterable") % - _validate_device_list.__name__) - - -def _validate_connector_type(data, valid_values=None): - if not data: - # A connector type is compulsory - msg = _("A connector type is required to create a gateway device") - return msg - connector_types = (valid_values if valid_values else - [utils.NetworkTypes.GRE, - utils.NetworkTypes.STT, - utils.NetworkTypes.BRIDGE, - 'ipsec%s' % utils.NetworkTypes.GRE, - 'ipsec%s' % utils.NetworkTypes.STT]) - if data not in connector_types: - msg = _("Unknown connector type: %s") % data - return msg - - -nw_gw_quota_opts = [ - cfg.IntOpt('quota_network_gateway', - default=5, - help=_('Number of network gateways allowed per tenant, ' - '-1 for unlimited')) -] - -cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS') - -attributes.validators['type:device_list'] = _validate_device_list -attributes.validators['type:connector_type'] = _validate_connector_type - - -class Networkgw(object): - """API extension for Layer-2 Gateway support. - - The Layer-2 gateway feature allows for connecting neutron networks - with external networks at the layer-2 level. No assumption is made on - the location of the external network, which might not even be directly - reachable from the hosts where the VMs are deployed. - - This is achieved by instantiating 'network gateways', and then connecting - Neutron network to them. - """ - - @classmethod - def get_name(cls): - return "Network Gateway" - - @classmethod - def get_alias(cls): - return EXT_ALIAS - - @classmethod - def get_description(cls): - return "Connects Neutron networks with external networks at layer 2." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/network-gateway/api/v1.0" - - @classmethod - def get_updated(cls): - return "2014-01-01T00:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - - member_actions = { - GATEWAY_RESOURCE_NAME.replace('_', '-'): { - 'connect_network': 'PUT', - 'disconnect_network': 'PUT'}} - - plural_mappings = resource_helper.build_plural_mappings( - {}, RESOURCE_ATTRIBUTE_MAP) - - return resource_helper.build_resource_info(plural_mappings, - RESOURCE_ATTRIBUTE_MAP, - None, - action_map=member_actions, - register_quota=True, - translate_name=True) - - def get_extended_resources(self, version): - if version == "2.0": - return RESOURCE_ATTRIBUTE_MAP - else: - return {} - - -class NetworkGatewayPluginBase(object): - - @abc.abstractmethod - def create_network_gateway(self, context, network_gateway): - pass - - @abc.abstractmethod - def update_network_gateway(self, context, id, network_gateway): - pass - - @abc.abstractmethod - def get_network_gateway(self, context, id, fields=None): - pass - - @abc.abstractmethod - def delete_network_gateway(self, context, id): - pass - - @abc.abstractmethod - def get_network_gateways(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - pass - - @abc.abstractmethod - def connect_network(self, context, network_gateway_id, - network_mapping_info): - pass - - @abc.abstractmethod - def disconnect_network(self, context, network_gateway_id, - network_mapping_info): - pass - - @abc.abstractmethod - def create_gateway_device(self, context, gateway_device): - pass - - @abc.abstractmethod - def update_gateway_device(self, context, id, gateway_device): - pass - - @abc.abstractmethod - def delete_gateway_device(self, context, id): - pass - - @abc.abstractmethod - def get_gateway_device(self, context, id, fields=None): - pass - - @abc.abstractmethod - def get_gateway_devices(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - pass diff --git a/neutron/plugins/vmware/extensions/nvp_qos.py b/neutron/plugins/vmware/extensions/nvp_qos.py deleted file mode 100644 index 470f267b5..000000000 --- a/neutron/plugins/vmware/extensions/nvp_qos.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2013 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# TODO(arosen): This is deprecated in Juno, and -# to be removed in Kxxxx. - -from neutron.plugins.vmware.extensions import qos - - -class Nvp_qos(qos.Qos): - """(Deprecated) Port Queue extension.""" - - @classmethod - def get_name(cls): - return "nvp-qos" - - @classmethod - def get_alias(cls): - return "nvp-qos" - - @classmethod - def get_description(cls): - return "NVP QoS extension (deprecated)." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" diff --git a/neutron/plugins/vmware/extensions/qos.py b/neutron/plugins/vmware/extensions/qos.py deleted file mode 100644 index 45b343a1e..000000000 --- a/neutron/plugins/vmware/extensions/qos.py +++ /dev/null @@ -1,223 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc - -from neutron.api import extensions -from neutron.api.v2 import attributes as attr -from neutron.api.v2 import base -from neutron.common import exceptions as qexception -from neutron import manager - - -# For policy.json/Auth -qos_queue_create = "create_qos_queue" -qos_queue_delete = "delete_qos_queue" -qos_queue_get = "get_qos_queue" -qos_queue_list = "get_qos_queues" - - -class DefaultQueueCreateNotAdmin(qexception.InUse): - message = _("Need to be admin in order to create queue called default") - - -class DefaultQueueAlreadyExists(qexception.InUse): - message = _("Default queue already exists.") - - -class QueueInvalidDscp(qexception.InvalidInput): - message = _("Invalid value for dscp %(data)s must be integer value" - " between 0 and 63.") - - -class QueueMinGreaterMax(qexception.InvalidInput): - message = _("Invalid bandwidth rate, min greater than max.") - - -class QueueInvalidBandwidth(qexception.InvalidInput): - message = _("Invalid bandwidth rate, %(data)s must be a non negative" - " integer.") - - -class QueueNotFound(qexception.NotFound): - message = _("Queue %(id)s does not exist") - - -class QueueInUseByPort(qexception.InUse): - message = _("Unable to delete queue attached to port.") - - -class QueuePortBindingNotFound(qexception.NotFound): - message = _("Port is not associated with lqueue") - - -def convert_to_unsigned_int_or_none(val): - if val is None: - return - try: - val = int(val) - if val < 0: - raise ValueError - except (ValueError, TypeError): - msg = _("'%s' must be a non negative integer.") % val - raise qexception.InvalidInput(error_message=msg) - return val - - -def convert_to_unsigned_int_or_none_max_63(val): - val = convert_to_unsigned_int_or_none(val) - if val > 63: - raise QueueInvalidDscp(data=val) - return val - -# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is -# untrusted, DSCP must be specified. Whichever default values we choose for -# the tuple (qos_marking, dscp), there will be at least one combination of a -# request with conflicting values: for instance, with the following default: -# -# qos_marking = 'untrusted', dscp = '0' -# -# requests with qos_marking = 'trusted' and a default dscp will fail. Since -# it is convoluted to ask the admin to specify a None value for dscp when -# qos_marking is 'trusted', it is best to ignore the dscp value, regardless -# of whether it has been specified or not. This preserves the chosen default -# and keeps backward compatibility with the API. A warning will be logged, as -# the server is overriding a potentially conflicting request from the admin -RESOURCE_ATTRIBUTE_MAP = { - 'qos_queues': { - 'id': {'allow_post': False, 'allow_put': False, - 'is_visible': True}, - 'default': {'allow_post': True, 'allow_put': False, - 'convert_to': attr.convert_to_boolean, - 'is_visible': True, 'default': False}, - 'name': {'allow_post': True, 'allow_put': False, - 'validate': {'type:string': None}, - 'is_visible': True, 'default': ''}, - 'min': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '0', - 'convert_to': convert_to_unsigned_int_or_none}, - 'max': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': None, - 'convert_to': convert_to_unsigned_int_or_none}, - 'qos_marking': {'allow_post': True, 'allow_put': False, - 'validate': {'type:values': ['untrusted', 'trusted']}, - 'default': 'untrusted', 'is_visible': True}, - 'dscp': {'allow_post': True, 'allow_put': False, - 'is_visible': True, 'default': '0', - 'convert_to': convert_to_unsigned_int_or_none_max_63}, - 'tenant_id': {'allow_post': True, 'allow_put': False, - 'required_by_policy': True, - 'validate': {'type:string': None}, - 'is_visible': True}, - }, -} - - -QUEUE = 'queue_id' -RXTX_FACTOR = 'rxtx_factor' -EXTENDED_ATTRIBUTES_2_0 = { - 'ports': { - RXTX_FACTOR: {'allow_post': True, - # FIXME(arosen): the plugin currently does not - # implement updating rxtx factor on port. - 'allow_put': True, - 'is_visible': False, - 'default': 1, - 'enforce_policy': True, - 'convert_to': convert_to_unsigned_int_or_none}, - - QUEUE: {'allow_post': False, - 'allow_put': False, - 'is_visible': True, - 'default': False, - 'enforce_policy': True}}, - 'networks': {QUEUE: {'allow_post': True, - 'allow_put': True, - 'is_visible': True, - 'default': False, - 'enforce_policy': True}} - -} - - -class Qos(object): - """Port Queue extension.""" - - @classmethod - def get_name(cls): - return "QoS Queue" - - @classmethod - def get_alias(cls): - return "qos-queue" - - @classmethod - def get_description(cls): - return "NSX QoS extension." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/qos-queue/api/v2.0" - - @classmethod - def get_updated(cls): - return "2014-01-01T00:00:00-00:00" - - @classmethod - def get_resources(cls): - """Returns Ext Resources.""" - exts = [] - plugin = manager.NeutronManager.get_plugin() - resource_name = 'qos_queue' - collection_name = resource_name.replace('_', '-') + "s" - params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) - controller = base.create_resource(collection_name, - resource_name, - plugin, params, allow_bulk=False) - - ex = extensions.ResourceExtension(collection_name, - controller) - exts.append(ex) - - return exts - - def get_extended_resources(self, version): - if version == "2.0": - return dict(EXTENDED_ATTRIBUTES_2_0.items() + - RESOURCE_ATTRIBUTE_MAP.items()) - else: - return {} - - -class QueuePluginBase(object): - @abc.abstractmethod - def create_qos_queue(self, context, queue): - pass - - @abc.abstractmethod - def delete_qos_queue(self, context, id): - pass - - @abc.abstractmethod - def get_qos_queue(self, context, id, fields=None): - pass - - @abc.abstractmethod - def get_qos_queues(self, context, filters=None, fields=None, sorts=None, - limit=None, marker=None, page_reverse=False): - pass diff --git a/neutron/plugins/vmware/extensions/servicerouter.py b/neutron/plugins/vmware/extensions/servicerouter.py deleted file mode 100644 index ea5382407..000000000 --- a/neutron/plugins/vmware/extensions/servicerouter.py +++ /dev/null @@ -1,59 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.api import extensions -from neutron.api.v2 import attributes - - -SERVICE_ROUTER = 'service_router' -EXTENDED_ATTRIBUTES_2_0 = { - 'routers': { - SERVICE_ROUTER: {'allow_post': True, 'allow_put': False, - 'convert_to': attributes.convert_to_boolean, - 'default': False, 'is_visible': True}, - } -} - - -class Servicerouter(extensions.ExtensionDescriptor): - """Extension class supporting advanced service router.""" - - @classmethod - def get_name(cls): - return "Service Router" - - @classmethod - def get_alias(cls): - return "service-router" - - @classmethod - def get_description(cls): - return "Provides service router." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/service-router/api/v1.0" - - @classmethod - def get_updated(cls): - return "2013-08-08T00:00:00-00:00" - - def get_extended_resources(self, version): - if version == "2.0": - return EXTENDED_ATTRIBUTES_2_0 - else: - return {} diff --git a/neutron/plugins/vmware/nsx_cluster.py b/neutron/plugins/vmware/nsx_cluster.py deleted file mode 100644 index 1c564385d..000000000 --- a/neutron/plugins/vmware/nsx_cluster.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2012 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import exceptions - -LOG = logging.getLogger(__name__) -DEFAULT_PORT = 443 -# Raise if one of those attributes is not specified -REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user', - 'nsx_password', 'nsx_controllers'] -# Emit a INFO log if one of those attributes is not specified -IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid'] -# Deprecated attributes -DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route', - 'nvp_user', 'nvp_password', 'nvp_controllers'] - - -class NSXCluster(object): - """NSX cluster class. - - Encapsulates controller connections and the API client for a NSX cluster. - - Controller-specific parameters, such as timeouts are stored in the - elements of the controllers attribute, which are dicts. - """ - - def __init__(self, **kwargs): - self._required_attributes = REQUIRED_ATTRIBUTES[:] - self._important_attributes = IMPORTANT_ATTRIBUTES[:] - self._deprecated_attributes = {} - self._sanity_check(kwargs) - - for opt, val in self._deprecated_attributes.iteritems(): - LOG.deprecated(_("Attribute '%s' has been deprecated or moved " - "to a new section. See new configuration file " - "for details."), opt) - depr_func = getattr(self, '_process_%s' % opt, None) - if depr_func: - depr_func(val) - - # If everything went according to plan these two lists should be empty - if self._required_attributes: - raise exceptions.InvalidClusterConfiguration( - invalid_attrs=self._required_attributes) - if self._important_attributes: - LOG.info(_("The following cluster attributes were " - "not specified: %s'"), self._important_attributes) - # The API client will be explicitly created by users of this class - self.api_client = None - - def _sanity_check(self, options): - # Iterating this way ensures the conf parameters also - # define the structure of this class - for arg in cfg.CONF: - if arg not in DEPRECATED_ATTRIBUTES: - setattr(self, arg, options.get(arg, cfg.CONF.get(arg))) - self._process_attribute(arg) - elif options.get(arg) is not None: - # Process deprecated attributes only if specified - self._deprecated_attributes[arg] = options.get(arg) - - def _process_attribute(self, attribute): - # Process the attribute only if it's not empty! - if getattr(self, attribute, None): - if attribute in self._required_attributes: - self._required_attributes.remove(attribute) - if attribute in self._important_attributes: - self._important_attributes.remove(attribute) - handler_func = getattr(self, '_process_%s' % attribute, None) - if handler_func: - handler_func() - - def _process_nsx_controllers(self): - # If this raises something is not right, so let it bubble up - # TODO(salvatore-orlando): Also validate attribute here - for i, ctrl in enumerate(self.nsx_controllers or []): - if len(ctrl.split(':')) == 1: - self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT) - - def _process_nvp_controllers(self): - self.nsx_controllers = self.nvp_controllers - self._process_nsx_controllers() diff --git a/neutron/plugins/vmware/nsxlib/__init__.py b/neutron/plugins/vmware/nsxlib/__init__.py deleted file mode 100644 index b09460b59..000000000 --- a/neutron/plugins/vmware/nsxlib/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import exceptions as exception -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron import version - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" -# Prefix to be used for all NSX API calls -URI_PREFIX = "/ws.v1" -NEUTRON_VERSION = version.version_info.release_string() - -LOG = log.getLogger(__name__) - - -def _build_uri_path(resource, - resource_id=None, - parent_resource_id=None, - fields=None, - relations=None, - filters=None, - types=None, - is_attachment=False, - extra_action=None): - resources = resource.split('/') - res_path = resources[0] + (resource_id and "/%s" % resource_id or '') - if len(resources) > 1: - # There is also a parent resource to account for in the uri - res_path = "%s/%s/%s" % (resources[1], - parent_resource_id, - res_path) - if is_attachment: - res_path = "%s/attachment" % res_path - elif extra_action: - res_path = "%s/%s" % (res_path, extra_action) - params = [] - params.append(fields and "fields=%s" % fields) - params.append(relations and "relations=%s" % relations) - params.append(types and "types=%s" % types) - if filters: - params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()]) - uri_path = "%s/%s" % (URI_PREFIX, res_path) - non_empty_params = [x for x in params if x is not None] - if non_empty_params: - query_string = '&'.join(non_empty_params) - if query_string: - uri_path += "?%s" % query_string - return uri_path - - -def format_exception(etype, e, exception_locals): - """Consistent formatting for exceptions. - - :param etype: a string describing the exception type. - :param e: the exception. - :param execption_locals: calling context local variable dict. - :returns: a formatted string. - """ - msg = [_("Error. %(type)s exception: %(exc)s.") % - {'type': etype, 'exc': e}] - l = dict((k, v) for k, v in exception_locals.iteritems() - if k != 'request') - msg.append(_("locals=[%s]") % str(l)) - return ' '.join(msg) - - -def do_request(*args, **kwargs): - """Issue a request to the cluster specified in kwargs. - - :param args: a list of positional arguments. - :param kwargs: a list of keyworkds arguments. - :returns: the result of the operation loaded into a python - object or None. - """ - cluster = kwargs["cluster"] - try: - res = cluster.api_client.request(*args) - if res: - return json.loads(res) - except api_exc.ResourceNotFound: - raise exception.NotFound() - except api_exc.ReadOnlyMode: - raise nsx_exc.MaintenanceInProgress() - - -def get_single_query_page(path, cluster, page_cursor=None, - page_length=1000, neutron_only=True): - params = [] - if page_cursor: - params.append("_page_cursor=%s" % page_cursor) - params.append("_page_length=%s" % page_length) - # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still - # used for marking Neutron entities in order to preserve compatibility - if neutron_only: - params.append("tag_scope=quantum") - query_params = "&".join(params) - path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?", - query_params) - body = do_request(HTTP_GET, path, cluster=cluster) - # Result_count won't be returned if _page_cursor is supplied - return body['results'], body.get('page_cursor'), body.get('result_count') - - -def get_all_query_pages(path, cluster): - need_more_results = True - result_list = [] - page_cursor = None - while need_more_results: - results, page_cursor = get_single_query_page( - path, cluster, page_cursor)[:2] - if not page_cursor: - need_more_results = False - result_list.extend(results) - return result_list - - -def mk_body(**kwargs): - """Convenience function creates and dumps dictionary to string. - - :param kwargs: the key/value pirs to be dumped into a json string. - :returns: a json string. - """ - return json.dumps(kwargs, ensure_ascii=False) diff --git a/neutron/plugins/vmware/nsxlib/l2gateway.py b/neutron/plugins/vmware/nsxlib/l2gateway.py deleted file mode 100644 index bd261f922..000000000 --- a/neutron/plugins/vmware/nsxlib/l2gateway.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib -from neutron.plugins.vmware.nsxlib import switch - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" - -GWSERVICE_RESOURCE = "gateway-service" -TRANSPORTNODE_RESOURCE = "transport-node" - -LOG = log.getLogger(__name__) - - -def create_l2_gw_service(cluster, tenant_id, display_name, devices): - """Create a NSX Layer-2 Network Gateway Service. - - :param cluster: The target NSX cluster - :param tenant_id: Identifier of the Openstack tenant for which - the gateway service. - :param display_name: Descriptive name of this gateway service - :param devices: List of transport node uuids (and network - interfaces on them) to use for the network gateway service - :raise NsxApiException: if there is a problem while communicating - with the NSX controller - """ - # NOTE(salvatore-orlando): This is a little confusing, but device_id in - # NSX is actually the identifier a physical interface on the gateway - # device, which in the Neutron API is referred as interface_name - gateways = [{"transport_node_uuid": device['id'], - "device_id": device['interface_name'], - "type": "L2Gateway"} for device in devices] - gwservice_obj = { - "display_name": utils.check_and_truncate(display_name), - "tags": utils.get_tags(os_tid=tenant_id), - "gateways": gateways, - "type": "L2GatewayServiceConfig" - } - return nsxlib.do_request( - HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE), - json.dumps(gwservice_obj), cluster=cluster) - - -def plug_l2_gw_service(cluster, lswitch_id, lport_id, - gateway_id, vlan_id=None): - """Plug a Layer-2 Gateway Attachment object in a logical port.""" - att_obj = {'type': 'L2GatewayAttachment', - 'l2_gateway_service_uuid': gateway_id} - if vlan_id: - att_obj['vlan_id'] = vlan_id - return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj) - - -def get_l2_gw_service(cluster, gateway_id): - return nsxlib.do_request( - HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE, - resource_id=gateway_id), - cluster=cluster) - - -def get_l2_gw_services(cluster, tenant_id=None, - fields=None, filters=None): - actual_filters = dict(filters or {}) - if tenant_id: - actual_filters['tag'] = tenant_id - actual_filters['tag_scope'] = 'os_tid' - return nsxlib.get_all_query_pages( - nsxlib._build_uri_path(GWSERVICE_RESOURCE, - filters=actual_filters), - cluster) - - -def update_l2_gw_service(cluster, gateway_id, display_name): - # TODO(salvatore-orlando): Allow updates for gateways too - gwservice_obj = get_l2_gw_service(cluster, gateway_id) - if not display_name: - # Nothing to update - return gwservice_obj - gwservice_obj["display_name"] = utils.check_and_truncate(display_name) - return nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(GWSERVICE_RESOURCE, - resource_id=gateway_id), - json.dumps(gwservice_obj), cluster=cluster) - - -def delete_l2_gw_service(cluster, gateway_id): - nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(GWSERVICE_RESOURCE, - resource_id=gateway_id), - cluster=cluster) - - -def _build_gateway_device_body(tenant_id, display_name, neutron_id, - connector_type, connector_ip, - client_certificate, tz_uuid): - - connector_type_mappings = { - utils.NetworkTypes.STT: "STTConnector", - utils.NetworkTypes.GRE: "GREConnector", - utils.NetworkTypes.BRIDGE: "BridgeConnector", - 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", - 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"} - nsx_connector_type = connector_type_mappings.get(connector_type) - body = {"display_name": utils.check_and_truncate(display_name), - "tags": utils.get_tags(os_tid=tenant_id, - q_gw_dev_id=neutron_id), - "admin_status_enabled": True} - - if connector_ip and nsx_connector_type: - body["transport_connectors"] = [ - {"transport_zone_uuid": tz_uuid, - "ip_address": connector_ip, - "type": nsx_connector_type}] - - if client_certificate: - body["credential"] = {"client_certificate": - {"pem_encoded": client_certificate}, - "type": "SecurityCertificateCredential"} - return body - - -def create_gateway_device(cluster, tenant_id, display_name, neutron_id, - tz_uuid, connector_type, connector_ip, - client_certificate): - body = _build_gateway_device_body(tenant_id, display_name, neutron_id, - connector_type, connector_ip, - client_certificate, tz_uuid) - try: - return nsxlib.do_request( - HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE), - json.dumps(body), cluster=cluster) - except api_exc.InvalidSecurityCertificate: - raise nsx_exc.InvalidSecurityCertificate() - - -def update_gateway_device(cluster, gateway_id, tenant_id, - display_name, neutron_id, - tz_uuid, connector_type, connector_ip, - client_certificate): - body = _build_gateway_device_body(tenant_id, display_name, neutron_id, - connector_type, connector_ip, - client_certificate, tz_uuid) - try: - return nsxlib.do_request( - HTTP_PUT, - nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, - resource_id=gateway_id), - json.dumps(body), cluster=cluster) - except api_exc.InvalidSecurityCertificate: - raise nsx_exc.InvalidSecurityCertificate() - - -def delete_gateway_device(cluster, device_uuid): - return nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE, - device_uuid), - cluster=cluster) - - -def get_gateway_device_status(cluster, device_uuid): - status_res = nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path( - TRANSPORTNODE_RESOURCE, - device_uuid, - extra_action='status'), - cluster=cluster) - # Returns the connection status - return status_res['connection']['connected'] - - -def get_gateway_devices_status(cluster, tenant_id=None): - if tenant_id: - gw_device_query_path = nsxlib._build_uri_path( - TRANSPORTNODE_RESOURCE, - fields="uuid,tags", - relations="TransportNodeStatus", - filters={'tag': tenant_id, - 'tag_scope': 'os_tid'}) - else: - gw_device_query_path = nsxlib._build_uri_path( - TRANSPORTNODE_RESOURCE, - fields="uuid,tags", - relations="TransportNodeStatus") - - response = nsxlib.get_all_query_pages(gw_device_query_path, cluster) - results = {} - for item in response: - results[item['uuid']] = (item['_relations']['TransportNodeStatus'] - ['connection']['connected']) - return results diff --git a/neutron/plugins/vmware/nsxlib/lsn.py b/neutron/plugins/vmware/nsxlib/lsn.py deleted file mode 100644 index f67288bf0..000000000 --- a/neutron/plugins/vmware/nsxlib/lsn.py +++ /dev/null @@ -1,270 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import exceptions as exception -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" - -SERVICECLUSTER_RESOURCE = "edge-cluster" -LSERVICESNODE_RESOURCE = "lservices-node" -LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE -SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret'] - -LOG = log.getLogger(__name__) - - -def service_cluster_exists(cluster, svc_cluster_id): - exists = False - try: - exists = ( - svc_cluster_id and - nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path( - SERVICECLUSTER_RESOURCE, - resource_id=svc_cluster_id), - cluster=cluster) is not None) - except exception.NotFound: - pass - return exists - - -def lsn_for_network_create(cluster, network_id): - lsn_obj = { - "edge_cluster_uuid": cluster.default_service_cluster_uuid, - "tags": utils.get_tags(n_network_id=network_id) - } - return nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LSERVICESNODE_RESOURCE), - json.dumps(lsn_obj), - cluster=cluster)["uuid"] - - -def lsn_for_network_get(cluster, network_id): - filters = {"tag": network_id, "tag_scope": "n_network_id"} - results = nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, - fields="uuid", - filters=filters), - cluster=cluster)['results'] - if not results: - raise exception.NotFound() - elif len(results) == 1: - return results[0]['uuid'] - - -def lsn_delete(cluster, lsn_id): - nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, - resource_id=lsn_id), - cluster=cluster) - - -def lsn_port_host_entries_update( - cluster, lsn_id, lsn_port_id, conf, hosts_data): - hosts_obj = {'hosts': hosts_data} - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id, - extra_action=conf), - json.dumps(hosts_obj), - cluster=cluster) - - -def lsn_port_create(cluster, lsn_id, port_data): - port_obj = { - "ip_address": port_data["ip_address"], - "mac_address": port_data["mac_address"], - "tags": utils.get_tags(n_mac_address=port_data["mac_address"], - n_subnet_id=port_data["subnet_id"]), - "type": "LogicalServicesNodePortConfig", - } - return nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id), - json.dumps(port_obj), - cluster=cluster)["uuid"] - - -def lsn_port_delete(cluster, lsn_id, lsn_port_id): - return nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id), - cluster=cluster) - - -def _lsn_port_get(cluster, lsn_id, filters): - results = nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path( - LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - fields="uuid", - filters=filters), - cluster=cluster)['results'] - if not results: - raise exception.NotFound() - elif len(results) == 1: - return results[0]['uuid'] - - -def lsn_port_by_mac_get(cluster, lsn_id, mac_address): - filters = {"tag": mac_address, "tag_scope": "n_mac_address"} - return _lsn_port_get(cluster, lsn_id, filters) - - -def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): - filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"} - return _lsn_port_get(cluster, lsn_id, filters) - - -def lsn_port_info_get(cluster, lsn_id, lsn_port_id): - result = nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path( - LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id), - cluster=cluster) - for tag in result['tags']: - if tag['scope'] == 'n_subnet_id': - result['subnet_id'] = tag['tag'] - break - return result - - -def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): - patch_obj = { - "type": "PatchAttachment", - "peer_port_uuid": lswitch_port_id - } - try: - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id, - is_attachment=True), - json.dumps(patch_obj), - cluster=cluster) - except api_exc.Conflict: - # This restriction might be lifted at some point - msg = (_("Attempt to plug Logical Services Node %(lsn)s into " - "network with port %(port)s failed. PatchAttachment " - "already exists with another port") % - {'lsn': lsn_id, 'port': lswitch_port_id}) - LOG.exception(msg) - raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id) - - -def _lsn_configure_action( - cluster, lsn_id, action, is_enabled, obj): - lsn_obj = {"enabled": is_enabled} - lsn_obj.update(obj) - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, - resource_id=lsn_id, - extra_action=action), - json.dumps(lsn_obj), - cluster=cluster) - - -def _lsn_port_configure_action( - cluster, lsn_id, lsn_port_id, action, is_enabled, obj): - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, - resource_id=lsn_id, - extra_action=action), - json.dumps({"enabled": is_enabled}), - cluster=cluster) - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id, - extra_action=action), - json.dumps(obj), - cluster=cluster) - - -def _get_opts(name, value): - return {"name": name, "value": str(value)} - - -def lsn_port_dhcp_configure( - cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None): - dhcp_options = dhcp_options or {} - opts = [_get_opts(key, val) for key, val in dhcp_options.iteritems()] - dhcp_obj = {'options': opts} - _lsn_port_configure_action( - cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj) - - -def lsn_metadata_configure( - cluster, lsn_id, is_enabled=True, metadata_info=None): - meta_obj = { - 'metadata_server_ip': metadata_info['metadata_server_ip'], - 'metadata_server_port': metadata_info['metadata_server_port'], - } - if metadata_info: - opts = [ - _get_opts(opt, metadata_info[opt]) - for opt in SUPPORTED_METADATA_OPTIONS - if metadata_info.get(opt) - ] - if opts: - meta_obj["options"] = opts - _lsn_configure_action( - cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj) - - -def _lsn_port_host_action( - cluster, lsn_id, lsn_port_id, host_obj, extra_action, action): - nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, - parent_resource_id=lsn_id, - resource_id=lsn_port_id, - extra_action=extra_action, - filters={"action": action}), - json.dumps(host_obj), - cluster=cluster) - - -def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data): - _lsn_port_host_action( - cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host') - - -def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data): - _lsn_port_host_action( - cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host') - - -def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data): - _lsn_port_host_action( - cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host') - - -def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data): - _lsn_port_host_action(cluster, lsn_id, lsn_port_id, - host_data, 'metadata-proxy', 'remove_host') diff --git a/neutron/plugins/vmware/nsxlib/queue.py b/neutron/plugins/vmware/nsxlib/queue.py deleted file mode 100644 index 708a210b6..000000000 --- a/neutron/plugins/vmware/nsxlib/queue.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.api.v2 import attributes as attr -from neutron.common import exceptions as exception -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib - -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" - -LQUEUE_RESOURCE = "lqueue" - -LOG = log.getLogger(__name__) - - -def create_lqueue(cluster, queue_data): - params = { - 'name': 'display_name', - 'qos_marking': 'qos_marking', - 'min': 'min_bandwidth_rate', - 'max': 'max_bandwidth_rate', - 'dscp': 'dscp' - } - queue_obj = dict( - (nsx_name, queue_data.get(api_name)) - for api_name, nsx_name in params.iteritems() - if attr.is_attr_set(queue_data.get(api_name)) - ) - if 'display_name' in queue_obj: - queue_obj['display_name'] = utils.check_and_truncate( - queue_obj['display_name']) - - queue_obj['tags'] = utils.get_tags() - try: - return nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LQUEUE_RESOURCE), - jsonutils.dumps(queue_obj), - cluster=cluster)['uuid'] - except api_exc.NsxApiException: - # FIXME(salv-orlando): This should not raise NeutronException - with excutils.save_and_reraise_exception(): - raise exception.NeutronException() - - -def delete_lqueue(cluster, queue_id): - try: - nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(LQUEUE_RESOURCE, - resource_id=queue_id), - cluster=cluster) - except Exception: - # FIXME(salv-orlando): This should not raise NeutronException - with excutils.save_and_reraise_exception(): - raise exception.NeutronException() diff --git a/neutron/plugins/vmware/nsxlib/router.py b/neutron/plugins/vmware/nsxlib/router.py deleted file mode 100644 index 52d34299f..000000000 --- a/neutron/plugins/vmware/nsxlib/router.py +++ /dev/null @@ -1,689 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo.config import cfg - -from neutron.common import exceptions as exception -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib -from neutron.plugins.vmware.nsxlib import switch -from neutron.plugins.vmware.nsxlib import versioning - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" - -LROUTER_RESOURCE = "lrouter" -LROUTER_RESOURCE = "lrouter" -LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE -LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE -LROUTERNAT_RESOURCE = "nat/lrouter" -# Constants for NAT rules -MATCH_KEYS = ["destination_ip_addresses", "destination_port_max", - "destination_port_min", "source_ip_addresses", - "source_port_max", "source_port_min", "protocol"] - -LOG = log.getLogger(__name__) - - -def _prepare_lrouter_body(name, neutron_router_id, tenant_id, - router_type, distributed=None, **kwargs): - body = { - "display_name": utils.check_and_truncate(name), - "tags": utils.get_tags(os_tid=tenant_id, - q_router_id=neutron_router_id), - "routing_config": { - "type": router_type - }, - "type": "LogicalRouterConfig", - "replication_mode": cfg.CONF.NSX.replication_mode, - } - # add the distributed key only if not None (ie: True or False) - if distributed is not None: - body['distributed'] = distributed - if kwargs: - body["routing_config"].update(kwargs) - return body - - -def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, - display_name, nexthop, distributed=None): - implicit_routing_config = { - "default_route_next_hop": { - "gateway_ip_address": nexthop, - "type": "RouterNextHop" - }, - } - lrouter_obj = _prepare_lrouter_body( - display_name, neutron_router_id, tenant_id, - "SingleDefaultRouteImplicitRoutingConfig", - distributed=distributed, - **implicit_routing_config) - return nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LROUTER_RESOURCE), - jsonutils.dumps(lrouter_obj), cluster=cluster) - - -def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id, - display_name, nexthop): - """Create a NSX logical router on the specified cluster. - - :param cluster: The target NSX cluster - :param tenant_id: Identifier of the Openstack tenant for which - the logical router is being created - :param display_name: Descriptive name of this logical router - :param nexthop: External gateway IP address for the logical router - :raise NsxApiException: if there is a problem while communicating - with the NSX controller - """ - return _create_implicit_routing_lrouter( - cluster, neutron_router_id, tenant_id, display_name, nexthop) - - -def create_implicit_routing_lrouter_with_distribution( - cluster, neutron_router_id, tenant_id, display_name, - nexthop, distributed=None): - """Create a NSX logical router on the specified cluster. - - This function also allows for creating distributed lrouters - :param cluster: The target NSX cluster - :param tenant_id: Identifier of the Openstack tenant for which - the logical router is being created - :param display_name: Descriptive name of this logical router - :param nexthop: External gateway IP address for the logical router - :param distributed: True for distributed logical routers - :raise NsxApiException: if there is a problem while communicating - with the NSX controller - """ - return _create_implicit_routing_lrouter( - cluster, neutron_router_id, tenant_id, - display_name, nexthop, distributed) - - -def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id, - display_name, nexthop, distributed=None): - lrouter_obj = _prepare_lrouter_body( - display_name, neutron_router_id, tenant_id, - "RoutingTableRoutingConfig", distributed=distributed) - router = nsxlib.do_request(HTTP_POST, - nsxlib._build_uri_path(LROUTER_RESOURCE), - jsonutils.dumps(lrouter_obj), cluster=cluster) - default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop} - create_explicit_route_lrouter(cluster, router['uuid'], default_gw) - return router - - -def delete_lrouter(cluster, lrouter_id): - nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(LROUTER_RESOURCE, - resource_id=lrouter_id), - cluster=cluster) - - -def get_lrouter(cluster, lrouter_id): - return nsxlib.do_request(HTTP_GET, - nsxlib._build_uri_path( - LROUTER_RESOURCE, - resource_id=lrouter_id, - relations='LogicalRouterStatus'), - cluster=cluster) - - -def query_lrouters(cluster, fields=None, filters=None): - return nsxlib.get_all_query_pages( - nsxlib._build_uri_path(LROUTER_RESOURCE, - fields=fields, - relations='LogicalRouterStatus', - filters=filters), - cluster) - - -def get_lrouters(cluster, tenant_id, fields=None, filters=None): - # FIXME(salv-orlando): Fields parameter is ignored in this routine - actual_filters = {} - if filters: - actual_filters.update(filters) - if tenant_id: - actual_filters['tag'] = tenant_id - actual_filters['tag_scope'] = 'os_tid' - lrouter_fields = "uuid,display_name,fabric_status,tags" - return query_lrouters(cluster, lrouter_fields, actual_filters) - - -def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop): - lrouter_obj = get_lrouter(cluster, r_id) - if not display_name and not nexthop: - # Nothing to update - return lrouter_obj - # It seems that this is faster than the doing an if on display_name - lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or - lrouter_obj["display_name"]) - if nexthop: - nh_element = lrouter_obj["routing_config"].get( - "default_route_next_hop") - if nh_element: - nh_element["gateway_ip_address"] = nexthop - return nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LROUTER_RESOURCE, - resource_id=r_id), - jsonutils.dumps(lrouter_obj), - cluster=cluster) - - -def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'): - static_filter = {'protocol': protocol_type} - existing_routes = nsxlib.do_request( - HTTP_GET, - nsxlib._build_uri_path(LROUTERRIB_RESOURCE, - filters=static_filter, - fields="*", - parent_resource_id=router_id), - cluster=cluster)['results'] - return existing_routes - - -def delete_explicit_route_lrouter(cluster, router_id, route_id): - nsxlib.do_request(HTTP_DELETE, - nsxlib._build_uri_path(LROUTERRIB_RESOURCE, - resource_id=route_id, - parent_resource_id=router_id), - cluster=cluster) - - -def create_explicit_route_lrouter(cluster, router_id, route): - next_hop_ip = route.get("nexthop") or route.get("next_hop_ip") - prefix = route.get("destination") or route.get("prefix") - uuid = nsxlib.do_request( - HTTP_POST, - nsxlib._build_uri_path(LROUTERRIB_RESOURCE, - parent_resource_id=router_id), - jsonutils.dumps({ - "action": "accept", - "next_hop_ip": next_hop_ip, - "prefix": prefix, - "protocol": "static" - }), - cluster=cluster)['uuid'] - return uuid - - -def update_explicit_routes_lrouter(cluster, router_id, routes): - # Update in bulk: delete them all, and add the ones specified - # but keep track of what is been modified to allow roll-backs - # in case of failures - nsx_routes = get_explicit_routes_lrouter(cluster, router_id) - try: - deleted_routes = [] - added_routes = [] - # omit the default route (0.0.0.0/0) from the processing; - # this must be handled through the nexthop for the router - for route in nsx_routes: - prefix = route.get("destination") or route.get("prefix") - if prefix != '0.0.0.0/0': - delete_explicit_route_lrouter(cluster, - router_id, - route['uuid']) - deleted_routes.append(route) - for route in routes: - prefix = route.get("destination") or route.get("prefix") - if prefix != '0.0.0.0/0': - uuid = create_explicit_route_lrouter(cluster, - router_id, route) - added_routes.append(uuid) - except api_exc.NsxApiException: - LOG.exception(_('Cannot update NSX routes %(routes)s for ' - 'router %(router_id)s'), - {'routes': routes, 'router_id': router_id}) - # Roll back to keep NSX in consistent state - with excutils.save_and_reraise_exception(): - if nsx_routes: - if deleted_routes: - for route in deleted_routes: - create_explicit_route_lrouter(cluster, - router_id, route) - if added_routes: - for route_id in added_routes: - delete_explicit_route_lrouter(cluster, - router_id, route_id) - return nsx_routes - - -def get_default_route_explicit_routing_lrouter_v33(cluster, router_id): - static_filter = {"protocol": "static", - "prefix": "0.0.0.0/0"} - default_route = nsxlib.do_request( - HTTP_GET, - nsxlib._build_uri_path(LROUTERRIB_RESOURCE, - filters=static_filter, - fields="*", - parent_resource_id=router_id), - cluster=cluster)["results"][0] - return default_route - - -def get_default_route_explicit_routing_lrouter_v32(cluster, router_id): - # Scan all routes because 3.2 does not support query by prefix - all_routes = get_explicit_routes_lrouter(cluster, router_id) - for route in all_routes: - if route['prefix'] == '0.0.0.0/0': - return route - - -def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop): - default_route = get_default_route_explicit_routing_lrouter(cluster, - router_id) - if next_hop != default_route["next_hop_ip"]: - new_default_route = {"action": "accept", - "next_hop_ip": next_hop, - "prefix": "0.0.0.0/0", - "protocol": "static"} - nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path( - LROUTERRIB_RESOURCE, - resource_id=default_route['uuid'], - parent_resource_id=router_id), - jsonutils.dumps(new_default_route), - cluster=cluster) - - -def update_explicit_routing_lrouter(cluster, router_id, - display_name, next_hop, routes=None): - update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop) - if next_hop: - update_default_gw_explicit_routing_lrouter(cluster, - router_id, next_hop) - if routes is not None: - return update_explicit_routes_lrouter(cluster, router_id, routes) - - -def query_lrouter_lports(cluster, lr_uuid, fields="*", - filters=None, relations=None): - uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, - parent_resource_id=lr_uuid, - fields=fields, filters=filters, - relations=relations) - return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] - - -def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id, - display_name, admin_status_enabled, ip_addresses, - mac_address=None): - """Creates a logical port on the assigned logical router.""" - lport_obj = dict( - admin_status_enabled=admin_status_enabled, - display_name=display_name, - tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), - ip_addresses=ip_addresses, - type="LogicalRouterPortConfig" - ) - # Only add the mac_address to lport_obj if present. This is because - # when creating the fake_ext_gw there is no mac_address present. - if mac_address: - lport_obj['mac_address'] = mac_address - path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, - parent_resource_id=lrouter_uuid) - result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), - cluster=cluster) - - LOG.debug(_("Created logical port %(lport_uuid)s on " - "logical router %(lrouter_uuid)s"), - {'lport_uuid': result['uuid'], - 'lrouter_uuid': lrouter_uuid}) - return result - - -def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid, - tenant_id, neutron_port_id, display_name, - admin_status_enabled, ip_addresses): - """Updates a logical port on the assigned logical router.""" - lport_obj = dict( - admin_status_enabled=admin_status_enabled, - display_name=display_name, - tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id), - ip_addresses=ip_addresses, - type="LogicalRouterPortConfig" - ) - # Do not pass null items to NSX - for key in lport_obj.keys(): - if lport_obj[key] is None: - del lport_obj[key] - path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, - lrouter_port_uuid, - parent_resource_id=lrouter_uuid) - result = nsxlib.do_request(HTTP_PUT, path, - jsonutils.dumps(lport_obj), - cluster=cluster) - LOG.debug(_("Updated logical port %(lport_uuid)s on " - "logical router %(lrouter_uuid)s"), - {'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid}) - return result - - -def delete_router_lport(cluster, lrouter_uuid, lport_uuid): - """Creates a logical port on the assigned logical router.""" - path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, - lrouter_uuid) - nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) - LOG.debug(_("Delete logical router port %(lport_uuid)s on " - "logical router %(lrouter_uuid)s"), - {'lport_uuid': lport_uuid, - 'lrouter_uuid': lrouter_uuid}) - - -def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid): - nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid, - relations="LogicalPortAttachment") - relations = nsx_port.get('_relations') - if relations: - att_data = relations.get('LogicalPortAttachment') - if att_data: - lrp_uuid = att_data.get('peer_port_uuid') - if lrp_uuid: - delete_router_lport(cluster, lr_uuid, lrp_uuid) - - -def find_router_gw_port(context, cluster, router_id): - """Retrieves the external gateway port for a NSX logical router.""" - - # Find the uuid of nsx ext gw logical router port - # TODO(salvatore-orlando): Consider storing it in Neutron DB - results = query_lrouter_lports( - cluster, router_id, - relations="LogicalPortAttachment") - for lport in results: - if '_relations' in lport: - attachment = lport['_relations'].get('LogicalPortAttachment') - if attachment and attachment.get('type') == 'L3GatewayAttachment': - return lport - - -def plug_router_port_attachment(cluster, router_id, port_id, - attachment_uuid, nsx_attachment_type, - attachment_vlan=None): - """Attach a router port to the given attachment. - - Current attachment types: - - PatchAttachment [-> logical switch port uuid] - - L3GatewayAttachment [-> L3GatewayService uuid] - For the latter attachment type a VLAN ID can be specified as well. - """ - uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id, - is_attachment=True) - attach_obj = {} - attach_obj["type"] = nsx_attachment_type - if nsx_attachment_type == "PatchAttachment": - attach_obj["peer_port_uuid"] = attachment_uuid - elif nsx_attachment_type == "L3GatewayAttachment": - attach_obj["l3_gateway_service_uuid"] = attachment_uuid - if attachment_vlan: - attach_obj['vlan_id'] = attachment_vlan - else: - raise nsx_exc.InvalidAttachmentType( - attachment_type=nsx_attachment_type) - return nsxlib.do_request( - HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster) - - -def _create_nat_match_obj(**kwargs): - nat_match_obj = {'ethertype': 'IPv4'} - delta = set(kwargs.keys()) - set(MATCH_KEYS) - if delta: - raise Exception(_("Invalid keys for NAT match: %s"), delta) - nat_match_obj.update(kwargs) - return nat_match_obj - - -def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj): - LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj) - uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, - parent_resource_id=router_id) - return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj), - cluster=cluster) - - -def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj): - return {"to_source_ip_address_min": min_src_ip, - "to_source_ip_address_max": max_src_ip, - "type": "SourceNatRule", - "match": nat_match_obj} - - -def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None): - LOG.info(_("No SNAT rules cannot be applied as they are not available in " - "this version of the NSX platform")) - - -def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None): - LOG.info(_("No DNAT rules cannot be applied as they are not available in " - "this version of the NSX platform")) - - -def create_lrouter_snat_rule_v2(cluster, router_id, - min_src_ip, max_src_ip, match_criteria=None): - - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip, - to_dst_port=None, match_criteria=None): - - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = { - "to_destination_ip_address_min": dst_ip, - "to_destination_ip_address_max": dst_ip, - "type": "DestinationNatRule", - "match": nat_match_obj - } - if to_dst_port: - nat_rule_obj['to_destination_port'] = to_dst_port - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None, - match_criteria=None): - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = { - "type": "NoSourceNatRule", - "match": nat_match_obj - } - if order: - nat_rule_obj['order'] = order - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None, - match_criteria=None): - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = { - "type": "NoDestinationNatRule", - "match": nat_match_obj - } - if order: - nat_rule_obj['order'] = order - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip, - order=None, match_criteria=None): - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj) - if order: - nat_rule_obj['order'] = order - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None, - order=None, match_criteria=None): - - nat_match_obj = _create_nat_match_obj(**match_criteria) - nat_rule_obj = { - "to_destination_ip_address": dst_ip, - "type": "DestinationNatRule", - "match": nat_match_obj - } - if to_dst_port: - nat_rule_obj['to_destination_port'] = to_dst_port - if order: - nat_rule_obj['order'] = order - return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj) - - -def delete_nat_rules_by_match(cluster, router_id, rule_type, - max_num_expected, - min_num_expected=0, - **kwargs): - # remove nat rules - nat_rules = query_nat_rules(cluster, router_id) - to_delete_ids = [] - for r in nat_rules: - if (r['type'] != rule_type): - continue - - for key, value in kwargs.iteritems(): - if not (key in r['match'] and r['match'][key] == value): - break - else: - to_delete_ids.append(r['uuid']) - if not (len(to_delete_ids) in - range(min_num_expected, max_num_expected + 1)): - raise nsx_exc.NatRuleMismatch(actual_rules=len(to_delete_ids), - min_rules=min_num_expected, - max_rules=max_num_expected) - - for rule_id in to_delete_ids: - delete_router_nat_rule(cluster, router_id, rule_id) - - -def delete_router_nat_rule(cluster, router_id, rule_id): - uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id) - nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) - - -def query_nat_rules(cluster, router_id, fields="*", filters=None): - uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, - parent_resource_id=router_id, - fields=fields, filters=filters) - return nsxlib.get_all_query_pages(uri, cluster) - - -# NOTE(salvatore-orlando): The following FIXME applies in general to -# each operation on list attributes. -# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface -def update_lrouter_port_ips(cluster, lrouter_id, lport_id, - ips_to_add, ips_to_remove): - uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id) - try: - port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) - # TODO(salvatore-orlando): Enforce ips_to_add intersection with - # ips_to_remove is empty - ip_address_set = set(port['ip_addresses']) - ip_address_set = ip_address_set - set(ips_to_remove) - ip_address_set = ip_address_set | set(ips_to_add) - # Set is not JSON serializable - convert to list - port['ip_addresses'] = list(ip_address_set) - nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port), - cluster=cluster) - except exception.NotFound: - # FIXME(salv-orlando):avoid raising different exception - data = {'lport_id': lport_id, 'lrouter_id': lrouter_id} - msg = (_("Router Port %(lport_id)s not found on router " - "%(lrouter_id)s") % data) - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - except api_exc.NsxApiException as e: - msg = _("An exception occurred while updating IP addresses on a " - "router logical port:%s") % str(e) - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - - -ROUTER_FUNC_DICT = { - 'create_lrouter': { - 2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, }, - 3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, - 1: create_implicit_routing_lrouter_with_distribution, - 2: create_explicit_routing_lrouter, }, }, - 'update_lrouter': { - 2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, }, - 3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, - 2: update_explicit_routing_lrouter, }, }, - 'create_lrouter_dnat_rule': { - 2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, }, - 3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, }, - 'create_lrouter_snat_rule': { - 2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, }, - 3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, }, - 'create_lrouter_nosnat_rule': { - 2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, }, - 3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, }, - 'create_lrouter_nodnat_rule': { - 2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, }, - 3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, }, - 'get_default_route_explicit_routing_lrouter': { - 3: {versioning.DEFAULT_VERSION: - get_default_route_explicit_routing_lrouter_v32, - 2: get_default_route_explicit_routing_lrouter_v32, }, }, -} - - -@versioning.versioned(ROUTER_FUNC_DICT) -def create_lrouter(cluster, *args, **kwargs): - if kwargs.get('distributed', None): - v = cluster.api_client.get_version() - if (v.major, v.minor) < (3, 1): - raise nsx_exc.InvalidVersion(version=v) - return v - - -@versioning.versioned(ROUTER_FUNC_DICT) -def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs): - pass - - -@versioning.versioned(ROUTER_FUNC_DICT) -def update_lrouter(cluster, *args, **kwargs): - if kwargs.get('routes', None): - v = cluster.api_client.get_version() - if (v.major, v.minor) < (3, 2): - raise nsx_exc.InvalidVersion(version=v) - return v - - -@versioning.versioned(ROUTER_FUNC_DICT) -def create_lrouter_dnat_rule(cluster, *args, **kwargs): - pass - - -@versioning.versioned(ROUTER_FUNC_DICT) -def create_lrouter_snat_rule(cluster, *args, **kwargs): - pass - - -@versioning.versioned(ROUTER_FUNC_DICT) -def create_lrouter_nosnat_rule(cluster, *args, **kwargs): - pass - - -@versioning.versioned(ROUTER_FUNC_DICT) -def create_lrouter_nodnat_rule(cluster, *args, **kwargs): - pass diff --git a/neutron/plugins/vmware/nsxlib/secgroup.py b/neutron/plugins/vmware/nsxlib/secgroup.py deleted file mode 100644 index 6c9ba5e2f..000000000 --- a/neutron/plugins/vmware/nsxlib/secgroup.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.common import constants -from neutron.common import exceptions -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" - -SECPROF_RESOURCE = "security-profile" - -LOG = log.getLogger(__name__) - - -def mk_body(**kwargs): - """Convenience function creates and dumps dictionary to string. - - :param kwargs: the key/value pirs to be dumped into a json string. - :returns: a json string. - """ - return json.dumps(kwargs, ensure_ascii=False) - - -def query_security_profiles(cluster, fields=None, filters=None): - return nsxlib.get_all_query_pages( - nsxlib._build_uri_path(SECPROF_RESOURCE, - fields=fields, - filters=filters), - cluster) - - -def create_security_profile(cluster, tenant_id, neutron_id, security_profile): - """Create a security profile on the NSX backend. - - :param cluster: a NSX cluster object reference - :param tenant_id: identifier of the Neutron tenant - :param neutron_id: neutron security group identifier - :param security_profile: dictionary with data for - configuring the NSX security profile. - """ - path = "/ws.v1/security-profile" - # Allow all dhcp responses and all ingress traffic - hidden_rules = {'logical_port_egress_rules': - [{'ethertype': 'IPv4', - 'protocol': constants.PROTO_NUM_UDP, - 'port_range_min': constants.DHCP_RESPONSE_PORT, - 'port_range_max': constants.DHCP_RESPONSE_PORT, - 'ip_prefix': '0.0.0.0/0'}], - 'logical_port_ingress_rules': - [{'ethertype': 'IPv4'}, - {'ethertype': 'IPv6'}]} - display_name = utils.check_and_truncate(security_profile.get('name')) - # NOTE(salv-orlando): neutron-id tags are prepended with 'q' for - # historical reasons - body = mk_body( - tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id), - display_name=display_name, - logical_port_ingress_rules=( - hidden_rules['logical_port_ingress_rules']), - logical_port_egress_rules=hidden_rules['logical_port_egress_rules'] - ) - rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster) - if security_profile.get('name') == 'default': - # If security group is default allow ip traffic between - # members of the same security profile is allowed and ingress traffic - # from the switch - rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4', - 'profile_uuid': rsp['uuid']}, - {'ethertype': 'IPv6', - 'profile_uuid': rsp['uuid']}], - 'logical_port_ingress_rules': [{'ethertype': 'IPv4'}, - {'ethertype': 'IPv6'}]} - - update_security_group_rules(cluster, rsp['uuid'], rules) - LOG.debug(_("Created Security Profile: %s"), rsp) - return rsp - - -def update_security_group_rules(cluster, spid, rules): - path = "/ws.v1/security-profile/%s" % spid - - # Allow all dhcp responses in - rules['logical_port_egress_rules'].append( - {'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP, - 'port_range_min': constants.DHCP_RESPONSE_PORT, - 'port_range_max': constants.DHCP_RESPONSE_PORT, - 'ip_prefix': '0.0.0.0/0'}) - # If there are no ingress rules add bunk rule to drop all ingress traffic - if not rules['logical_port_ingress_rules']: - rules['logical_port_ingress_rules'].append( - {'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'}) - try: - body = mk_body( - logical_port_ingress_rules=rules['logical_port_ingress_rules'], - logical_port_egress_rules=rules['logical_port_egress_rules']) - rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster) - except exceptions.NotFound as e: - LOG.error(nsxlib.format_exception("Unknown", e, locals())) - #FIXME(salvatore-orlando): This should not raise NeutronException - raise exceptions.NeutronException() - LOG.debug(_("Updated Security Profile: %s"), rsp) - return rsp - - -def update_security_profile(cluster, spid, name): - return nsxlib.do_request( - HTTP_PUT, - nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid), - json.dumps({"display_name": utils.check_and_truncate(name)}), - cluster=cluster) - - -def delete_security_profile(cluster, spid): - path = "/ws.v1/security-profile/%s" % spid - - try: - nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) - except exceptions.NotFound: - with excutils.save_and_reraise_exception(): - # This is not necessarily an error condition - LOG.warn(_("Unable to find security profile %s on NSX backend"), - spid) diff --git a/neutron/plugins/vmware/nsxlib/switch.py b/neutron/plugins/vmware/nsxlib/switch.py deleted file mode 100644 index e94791e6f..000000000 --- a/neutron/plugins/vmware/nsxlib/switch.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo.config import cfg - -from neutron.common import constants -from neutron.common import exceptions as exception -from neutron.openstack.common import jsonutils as json -from neutron.openstack.common import log -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware import nsxlib - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" - -LSWITCH_RESOURCE = "lswitch" -LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE - -LOG = log.getLogger(__name__) - - -def _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled, security_profiles, - queue_id, mac_learning_enabled, - allowed_address_pairs): - lport_obj['allowed_address_pairs'] = [] - if port_security_enabled: - for fixed_ip in fixed_ips: - ip_address = fixed_ip.get('ip_address') - if ip_address: - lport_obj['allowed_address_pairs'].append( - {'mac_address': mac_address, 'ip_address': ip_address}) - # add address pair allowing src_ip 0.0.0.0 to leave - # this is required for outgoing dhcp request - lport_obj["allowed_address_pairs"].append( - {"mac_address": mac_address, - "ip_address": "0.0.0.0"}) - lport_obj['security_profiles'] = list(security_profiles or []) - lport_obj['queue_uuid'] = queue_id - if mac_learning_enabled is not None: - lport_obj["mac_learning"] = mac_learning_enabled - lport_obj["type"] = "LogicalSwitchPortConfig" - for address_pair in list(allowed_address_pairs or []): - lport_obj['allowed_address_pairs'].append( - {'mac_address': address_pair['mac_address'], - 'ip_address': address_pair['ip_address']}) - - -def get_lswitch_by_id(cluster, lswitch_id): - try: - lswitch_uri_path = nsxlib._build_uri_path( - LSWITCH_RESOURCE, lswitch_id, - relations="LogicalSwitchStatus") - return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) - except exception.NotFound: - # FIXME(salv-orlando): this should not raise a neutron exception - raise exception.NetworkNotFound(net_id=lswitch_id) - - -def get_lswitches(cluster, neutron_net_id): - - def lookup_switches_by_tag(): - # Fetch extra logical switches - lswitch_query_path = nsxlib._build_uri_path( - LSWITCH_RESOURCE, - fields="uuid,display_name,tags,lport_count", - relations="LogicalSwitchStatus", - filters={'tag': neutron_net_id, - 'tag_scope': 'quantum_net_id'}) - return nsxlib.get_all_query_pages(lswitch_query_path, cluster) - - lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id, - relations="LogicalSwitchStatus") - results = [] - try: - ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) - results.append(ls) - for tag in ls['tags']: - if (tag['scope'] == "multi_lswitch" and - tag['tag'] == "True"): - results.extend(lookup_switches_by_tag()) - except exception.NotFound: - # This is legit if the neutron network was created using - # a post-Havana version of the plugin - results.extend(lookup_switches_by_tag()) - if results: - return results - else: - raise exception.NetworkNotFound(net_id=neutron_net_id) - - -def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, - transport_zones_config, - shared=None, - **kwargs): - # The tag scope adopts a slightly different naming convention for - # historical reasons - lswitch_obj = {"display_name": utils.check_and_truncate(display_name), - "transport_zones": transport_zones_config, - "replication_mode": cfg.CONF.NSX.replication_mode, - "tags": utils.get_tags(os_tid=tenant_id, - quantum_net_id=neutron_net_id)} - # TODO(salv-orlando): Now that we have async status synchronization - # this tag is perhaps not needed anymore - if shared: - lswitch_obj["tags"].append({"tag": "true", - "scope": "shared"}) - if "tags" in kwargs: - lswitch_obj["tags"].extend(kwargs["tags"]) - uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) - lswitch = nsxlib.do_request(HTTP_POST, uri, json.dumps(lswitch_obj), - cluster=cluster) - LOG.debug(_("Created logical switch: %s"), lswitch['uuid']) - return lswitch - - -def update_lswitch(cluster, lswitch_id, display_name, - tenant_id=None, **kwargs): - uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) - lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} - # NOTE: tag update will not 'merge' existing tags with new ones. - tags = [] - if tenant_id: - tags = utils.get_tags(os_tid=tenant_id) - # The 'tags' kwarg might existing and be None - tags.extend(kwargs.get('tags') or []) - if tags: - lswitch_obj['tags'] = tags - try: - return nsxlib.do_request(HTTP_PUT, uri, json.dumps(lswitch_obj), - cluster=cluster) - except exception.NotFound as e: - LOG.error(_("Network not found, Error: %s"), str(e)) - raise exception.NetworkNotFound(net_id=lswitch_id) - - -def delete_network(cluster, net_id, lswitch_id): - delete_networks(cluster, net_id, [lswitch_id]) - - -#TODO(salvatore-orlando): Simplify and harmonize -def delete_networks(cluster, net_id, lswitch_ids): - for ls_id in lswitch_ids: - path = "/ws.v1/lswitch/%s" % ls_id - try: - nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) - except exception.NotFound as e: - LOG.error(_("Network not found, Error: %s"), str(e)) - raise exception.NetworkNotFound(net_id=ls_id) - - -def query_lswitch_lports(cluster, ls_uuid, fields="*", - filters=None, relations=None): - # Fix filter for attachments - if filters and "attachment" in filters: - filters['attachment_vif_uuid'] = filters["attachment"] - del filters['attachment'] - uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, - parent_resource_id=ls_uuid, - fields=fields, - filters=filters, - relations=relations) - return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] - - -def delete_port(cluster, switch, port): - uri = "/ws.v1/lswitch/" + switch + "/lport/" + port - try: - nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) - except exception.NotFound: - LOG.exception(_("Port or Network not found")) - raise exception.PortNotFoundOnNetwork( - net_id=switch, port_id=port) - except api_exc.NsxApiException: - raise exception.NeutronException() - - -def get_ports(cluster, networks=None, devices=None, tenants=None): - vm_filter_obsolete = "" - vm_filter = "" - tenant_filter = "" - # This is used when calling delete_network. Neutron checks to see if - # the network has any ports. - if networks: - # FIXME (Aaron) If we get more than one network_id this won't work - lswitch = networks[0] - else: - lswitch = "*" - if devices: - for device_id in devices: - vm_filter_obsolete = '&'.join( - ["tag_scope=vm_id", - "tag=%s" % utils.device_id_to_vm_id(device_id, - obfuscate=True), - vm_filter_obsolete]) - vm_filter = '&'.join( - ["tag_scope=vm_id", - "tag=%s" % utils.device_id_to_vm_id(device_id), - vm_filter]) - if tenants: - for tenant in tenants: - tenant_filter = '&'.join( - ["tag_scope=os_tid", - "tag=%s" % tenant, - tenant_filter]) - - nsx_lports = {} - lport_fields_str = ("tags,admin_status_enabled,display_name," - "fabric_status_up") - try: - lport_query_path_obsolete = ( - "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" - "&relations=LogicalPortStatus" % - (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) - lport_query_path = ( - "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" - "&relations=LogicalPortStatus" % - (lswitch, lport_fields_str, vm_filter, tenant_filter)) - try: - # NOTE(armando-migliaccio): by querying with obsolete tag first - # current deployments won't take the performance hit of a double - # call. In release L-** or M-**, we might want to swap the calls - # as it's likely that ports with the new tag would outnumber the - # ones with the old tag - ports = nsxlib.get_all_query_pages(lport_query_path_obsolete, - cluster) - if not ports: - ports = nsxlib.get_all_query_pages(lport_query_path, cluster) - except exception.NotFound: - LOG.warn(_("Lswitch %s not found in NSX"), lswitch) - ports = None - - if ports: - for port in ports: - for tag in port["tags"]: - if tag["scope"] == "q_port_id": - nsx_lports[tag["tag"]] = port - except Exception: - err_msg = _("Unable to get ports") - LOG.exception(err_msg) - raise nsx_exc.NsxPluginException(err_msg=err_msg) - return nsx_lports - - -def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id): - """Get port by neutron tag. - - Returns the NSX UUID of the logical port with tag q_port_id equal to - neutron_port_id or None if the port is not Found. - """ - uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, - parent_resource_id=lswitch_uuid, - fields='uuid', - filters={'tag': neutron_port_id, - 'tag_scope': 'q_port_id'}) - LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' " - "on: '%(lswitch_uuid)s'"), - {'neutron_port_id': neutron_port_id, - 'lswitch_uuid': lswitch_uuid}) - res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) - num_results = len(res["results"]) - if num_results >= 1: - if num_results > 1: - LOG.warn(_("Found '%(num_ports)d' ports with " - "q_port_id tag: '%(neutron_port_id)s'. " - "Only 1 was expected."), - {'num_ports': num_results, - 'neutron_port_id': neutron_port_id}) - return res["results"][0] - - -def get_port(cluster, network, port, relations=None): - LOG.info(_("get_port() %(network)s %(port)s"), - {'network': network, 'port': port}) - uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" - if relations: - uri += "relations=%s" % relations - try: - return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) - except exception.NotFound as e: - LOG.error(_("Port or Network not found, Error: %s"), str(e)) - raise exception.PortNotFoundOnNetwork( - port_id=port, net_id=network) - - -def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, - display_name, device_id, admin_status_enabled, - mac_address=None, fixed_ips=None, port_security_enabled=None, - security_profiles=None, queue_id=None, - mac_learning_enabled=None, allowed_address_pairs=None): - lport_obj = dict( - admin_status_enabled=admin_status_enabled, - display_name=utils.check_and_truncate(display_name), - tags=utils.get_tags(os_tid=tenant_id, - q_port_id=neutron_port_id, - vm_id=utils.device_id_to_vm_id(device_id))) - - _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled, security_profiles, - queue_id, mac_learning_enabled, - allowed_address_pairs) - - path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid - try: - result = nsxlib.do_request(HTTP_PUT, path, json.dumps(lport_obj), - cluster=cluster) - LOG.debug(_("Updated logical port %(result)s " - "on logical switch %(uuid)s"), - {'result': result['uuid'], 'uuid': lswitch_uuid}) - return result - except exception.NotFound as e: - LOG.error(_("Port or Network not found, Error: %s"), str(e)) - raise exception.PortNotFoundOnNetwork( - port_id=lport_uuid, net_id=lswitch_uuid) - - -def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, - display_name, device_id, admin_status_enabled, - mac_address=None, fixed_ips=None, port_security_enabled=None, - security_profiles=None, queue_id=None, - mac_learning_enabled=None, allowed_address_pairs=None): - """Creates a logical port on the assigned logical switch.""" - display_name = utils.check_and_truncate(display_name) - lport_obj = dict( - admin_status_enabled=admin_status_enabled, - display_name=display_name, - tags=utils.get_tags(os_tid=tenant_id, - q_port_id=neutron_port_id, - vm_id=utils.device_id_to_vm_id(device_id)) - ) - - _configure_extensions(lport_obj, mac_address, fixed_ips, - port_security_enabled, security_profiles, - queue_id, mac_learning_enabled, - allowed_address_pairs) - - path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, - parent_resource_id=lswitch_uuid) - result = nsxlib.do_request(HTTP_POST, path, json.dumps(lport_obj), - cluster=cluster) - - LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"), - {'result': result['uuid'], 'uuid': lswitch_uuid}) - return result - - -def get_port_status(cluster, lswitch_id, port_id): - """Retrieve the operational status of the port.""" - try: - r = nsxlib.do_request(HTTP_GET, - "/ws.v1/lswitch/%s/lport/%s/status" % - (lswitch_id, port_id), cluster=cluster) - except exception.NotFound as e: - LOG.error(_("Port not found, Error: %s"), str(e)) - raise exception.PortNotFoundOnNetwork( - port_id=port_id, net_id=lswitch_id) - if r['link_status_up'] is True: - return constants.PORT_STATUS_ACTIVE - else: - return constants.PORT_STATUS_DOWN - - -def plug_interface(cluster, lswitch_id, lport_id, att_obj): - return nsxlib.do_request(HTTP_PUT, - nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, - lport_id, lswitch_id, - is_attachment=True), - json.dumps(att_obj), - cluster=cluster) - - -def plug_vif_interface( - cluster, lswitch_id, port_id, port_type, attachment=None): - """Plug a VIF Attachment object in a logical port.""" - lport_obj = {} - if attachment: - lport_obj["vif_uuid"] = attachment - - lport_obj["type"] = port_type - return plug_interface(cluster, lswitch_id, port_id, lport_obj) diff --git a/neutron/plugins/vmware/nsxlib/versioning.py b/neutron/plugins/vmware/nsxlib/versioning.py deleted file mode 100644 index 0845a7d4c..000000000 --- a/neutron/plugins/vmware/nsxlib/versioning.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2014 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect - -from neutron.plugins.vmware.api_client import exception - -DEFAULT_VERSION = -1 - - -def versioned(func_table): - - def versioned_function(wrapped_func): - func_name = wrapped_func.__name__ - - def dispatch_versioned_function(cluster, *args, **kwargs): - # Call the wrapper function, in case we need to - # run validation checks regarding versions. It - # should return the NSX version - v = (wrapped_func(cluster, *args, **kwargs) or - cluster.api_client.get_version()) - func = get_function_by_version(func_table, func_name, v) - func_kwargs = kwargs - arg_spec = inspect.getargspec(func) - if not arg_spec.keywords and not arg_spec.varargs: - # drop args unknown to function from func_args - arg_set = set(func_kwargs.keys()) - for arg in arg_set - set(arg_spec.args): - del func_kwargs[arg] - # NOTE(salvatore-orlando): shall we fail here if a required - # argument is not passed, or let the called function raise? - return func(cluster, *args, **func_kwargs) - - return dispatch_versioned_function - return versioned_function - - -def get_function_by_version(func_table, func_name, ver): - if ver: - if ver.major not in func_table[func_name]: - major = max(func_table[func_name].keys()) - minor = max(func_table[func_name][major].keys()) - if major > ver.major: - raise NotImplementedError(_("Operation may not be supported")) - else: - major = ver.major - minor = ver.minor - if ver.minor not in func_table[func_name][major]: - minor = DEFAULT_VERSION - return func_table[func_name][major][minor] - else: - msg = _('NSX version is not set. Unable to complete request ' - 'correctly. Check log for NSX communication errors.') - raise exception.ServiceUnavailable(message=msg) diff --git a/neutron/plugins/vmware/plugin.py b/neutron/plugins/vmware/plugin.py deleted file mode 100644 index f5ea3dba1..000000000 --- a/neutron/plugins/vmware/plugin.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutron.plugins.vmware.plugins import base -from neutron.plugins.vmware.plugins import service - -NsxPlugin = base.NsxPluginV2 -NsxServicePlugin = service.NsxAdvancedPlugin diff --git a/neutron/plugins/vmware/plugins/__init__.py b/neutron/plugins/vmware/plugins/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/plugins/base.py b/neutron/plugins/vmware/plugins/base.py deleted file mode 100644 index 384964c8f..000000000 --- a/neutron/plugins/vmware/plugins/base.py +++ /dev/null @@ -1,2528 +0,0 @@ -# Copyright 2012 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import uuid - -from oslo.config import cfg -from sqlalchemy import exc as sql_exc -from sqlalchemy.orm import exc as sa_exc -import webob.exc - -from neutron.api import extensions as neutron_extensions -from neutron.api.v2 import attributes as attr -from neutron.api.v2 import base -from neutron.common import constants -from neutron.common import exceptions as n_exc -from neutron.common import utils -from neutron import context as q_context -from neutron.db import agentschedulers_db -from neutron.db import allowedaddresspairs_db as addr_pair_db -from neutron.db import db_base_plugin_v2 -from neutron.db import external_net_db -from neutron.db import extraroute_db -from neutron.db import l3_db -from neutron.db import l3_gwmode_db -from neutron.db import models_v2 -from neutron.db import portbindings_db -from neutron.db import portsecurity_db -from neutron.db import quota_db # noqa -from neutron.db import securitygroups_db -from neutron.extensions import allowedaddresspairs as addr_pair -from neutron.extensions import external_net as ext_net_extn -from neutron.extensions import extraroute -from neutron.extensions import l3 -from neutron.extensions import multiprovidernet as mpnet -from neutron.extensions import portbindings as pbin -from neutron.extensions import portsecurity as psec -from neutron.extensions import providernet as pnet -from neutron.extensions import securitygroup as ext_sg -from neutron.openstack.common.db import exception as db_exc -from neutron.openstack.common import excutils -from neutron.openstack.common import lockutils -from neutron.plugins.common import constants as plugin_const -from neutron.plugins import vmware -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import config # noqa -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import nsx_utils -from neutron.plugins.vmware.common import securitygroups as sg_utils -from neutron.plugins.vmware.common import sync -from neutron.plugins.vmware.common import utils as c_utils -from neutron.plugins.vmware.dbexts import db as nsx_db -from neutron.plugins.vmware.dbexts import distributedrouter as dist_rtr -from neutron.plugins.vmware.dbexts import maclearning as mac_db -from neutron.plugins.vmware.dbexts import networkgw_db -from neutron.plugins.vmware.dbexts import qos_db -from neutron.plugins.vmware import dhcpmeta_modes -from neutron.plugins.vmware.extensions import maclearning as mac_ext -from neutron.plugins.vmware.extensions import networkgw -from neutron.plugins.vmware.extensions import qos -from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib -from neutron.plugins.vmware.nsxlib import queue as queuelib -from neutron.plugins.vmware.nsxlib import router as routerlib -from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib -from neutron.plugins.vmware.nsxlib import switch as switchlib - -LOG = logging.getLogger("NeutronPlugin") - -NSX_NOSNAT_RULES_ORDER = 10 -NSX_FLOATINGIP_NAT_RULES_ORDER = 224 -NSX_EXTGW_NAT_RULES_ORDER = 255 -NSX_DEFAULT_NEXTHOP = '1.1.1.1' - - -class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - db_base_plugin_v2.NeutronDbPluginV2, - dhcpmeta_modes.DhcpMetadataAccess, - dist_rtr.DistributedRouter_mixin, - external_net_db.External_net_db_mixin, - extraroute_db.ExtraRoute_db_mixin, - l3_gwmode_db.L3_NAT_db_mixin, - mac_db.MacLearningDbMixin, - networkgw_db.NetworkGatewayMixin, - portbindings_db.PortBindingMixin, - portsecurity_db.PortSecurityDbMixin, - qos_db.QoSDbMixin, - securitygroups_db.SecurityGroupDbMixin): - - supported_extension_aliases = ["allowed-address-pairs", - "binding", - "dist-router", - "ext-gw-mode", - "extraroute", - "mac-learning", - "multi-provider", - "network-gateway", - "nvp-qos", - "port-security", - "provider", - "qos-queue", - "quotas", - "external-net", - "router", - "security-group"] - - __native_bulk_support = True - __native_pagination_support = True - __native_sorting_support = True - - # Map nova zones to cluster for easy retrieval - novazone_cluster_map = {} - - def __init__(self): - super(NsxPluginV2, self).__init__() - config.validate_config_options() - # TODO(salv-orlando): Replace These dicts with - # collections.defaultdict for better handling of default values - # Routines for managing logical ports in NSX - self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW, - l3_db.DEVICE_OWNER_ROUTER_INTF] - self._port_drivers = { - 'create': {l3_db.DEVICE_OWNER_ROUTER_GW: - self._nsx_create_ext_gw_port, - l3_db.DEVICE_OWNER_FLOATINGIP: - self._nsx_create_fip_port, - l3_db.DEVICE_OWNER_ROUTER_INTF: - self._nsx_create_router_port, - networkgw_db.DEVICE_OWNER_NET_GW_INTF: - self._nsx_create_l2_gw_port, - 'default': self._nsx_create_port}, - 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW: - self._nsx_delete_ext_gw_port, - l3_db.DEVICE_OWNER_ROUTER_INTF: - self._nsx_delete_router_port, - l3_db.DEVICE_OWNER_FLOATINGIP: - self._nsx_delete_fip_port, - networkgw_db.DEVICE_OWNER_NET_GW_INTF: - self._nsx_delete_port, - 'default': self._nsx_delete_port} - } - - neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH]) - self.nsx_opts = cfg.CONF.NSX - self.nsx_sync_opts = cfg.CONF.NSX_SYNC - self.cluster = nsx_utils.create_nsx_cluster( - cfg.CONF, - self.nsx_opts.concurrent_connections, - self.nsx_opts.nsx_gen_timeout) - - self.base_binding_dict = { - pbin.VIF_TYPE: pbin.VIF_TYPE_OVS, - pbin.VIF_DETAILS: { - # TODO(rkukura): Replace with new VIF security details - pbin.CAP_PORT_FILTER: - 'security-group' in self.supported_extension_aliases}} - - self._extend_fault_map() - self.setup_dhcpmeta_access() - # Set this flag to false as the default gateway has not - # been yet updated from the config file - self._is_default_net_gw_in_sync = False - # Create a synchronizer instance for backend sync - self._synchronizer = sync.NsxSynchronizer( - self.safe_reference, self.cluster, - self.nsx_sync_opts.state_sync_interval, - self.nsx_sync_opts.min_sync_req_delay, - self.nsx_sync_opts.min_chunk_size, - self.nsx_sync_opts.max_random_sync_delay) - - def _ensure_default_network_gateway(self): - if self._is_default_net_gw_in_sync: - return - # Add the gw in the db as default, and unset any previous default - def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid - try: - ctx = q_context.get_admin_context() - self._unset_default_network_gateways(ctx) - if not def_l2_gw_uuid: - return - try: - def_network_gw = self._get_network_gateway(ctx, - def_l2_gw_uuid) - except networkgw_db.GatewayNotFound: - # Create in DB only - don't go to backend - def_gw_data = {'id': def_l2_gw_uuid, - 'name': 'default L2 gateway service', - 'devices': []} - gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_') - def_network_gw = super( - NsxPluginV2, self).create_network_gateway( - ctx, {gw_res_name: def_gw_data}) - # In any case set is as default - self._set_default_network_gateway(ctx, def_network_gw['id']) - # Ensure this method is executed only once - self._is_default_net_gw_in_sync = True - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Unable to process default l2 gw service:%s"), - def_l2_gw_uuid) - - def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): - """Build ip_addresses data structure for logical router port. - - No need to perform validation on IPs - this has already been - done in the l3_db mixin class. - """ - ip_addresses = [] - for ip in fixed_ips: - if not subnet_ids or (ip['subnet_id'] in subnet_ids): - subnet = self._get_subnet(context, ip['subnet_id']) - ip_prefix = '%s/%s' % (ip['ip_address'], - subnet['cidr'].split('/')[1]) - ip_addresses.append(ip_prefix) - return ip_addresses - - def _create_and_attach_router_port(self, cluster, context, - nsx_router_id, port_data, - attachment_type, attachment, - attachment_vlan=None, - subnet_ids=None): - # Use a fake IP address if gateway port is not 'real' - ip_addresses = (port_data.get('fake_ext_gw') and - ['0.0.0.0/31'] or - self._build_ip_address_list(context, - port_data['fixed_ips'], - subnet_ids)) - try: - lrouter_port = routerlib.create_router_lport( - cluster, nsx_router_id, port_data.get('tenant_id', 'fake'), - port_data.get('id', 'fake'), port_data.get('name', 'fake'), - port_data.get('admin_state_up', True), ip_addresses, - port_data.get('mac_address')) - LOG.debug(_("Created NSX router port:%s"), lrouter_port['uuid']) - except api_exc.NsxApiException: - LOG.exception(_("Unable to create port on NSX logical router %s"), - nsx_router_id) - raise nsx_exc.NsxPluginException( - err_msg=_("Unable to create logical router port for neutron " - "port id %(port_id)s on router %(nsx_router_id)s") % - {'port_id': port_data.get('id'), - 'nsx_router_id': nsx_router_id}) - self._update_router_port_attachment(cluster, context, nsx_router_id, - port_data, lrouter_port['uuid'], - attachment_type, attachment, - attachment_vlan) - return lrouter_port - - def _update_router_gw_info(self, context, router_id, info): - # NOTE(salvatore-orlando): We need to worry about rollback of NSX - # configuration in case of failures in the process - # Ref. LP bug 1102301 - router = self._get_router(context, router_id) - # Check whether SNAT rule update should be triggered - # NSX also supports multiple external networks so there is also - # the possibility that NAT rules should be replaced - current_ext_net_id = router.gw_port_id and router.gw_port.network_id - new_ext_net_id = info and info.get('network_id') - # SNAT should be enabled unless info['enable_snat'] is - # explicitly set to false - enable_snat = new_ext_net_id and info.get('enable_snat', True) - # Remove if ext net removed, changed, or if snat disabled - remove_snat_rules = (current_ext_net_id and - new_ext_net_id != current_ext_net_id or - router.enable_snat and not enable_snat) - # Add rules if snat is enabled, and if either the external network - # changed or snat was previously disabled - # NOTE: enable_snat == True implies new_ext_net_id != None - add_snat_rules = (enable_snat and - (new_ext_net_id != current_ext_net_id or - not router.enable_snat)) - router = super(NsxPluginV2, self)._update_router_gw_info( - context, router_id, info, router=router) - # Add/Remove SNAT rules as needed - # Create an elevated context for dealing with metadata access - # cidrs which are created within admin context - ctx_elevated = context.elevated() - if remove_snat_rules or add_snat_rules: - cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id) - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - if remove_snat_rules: - # Be safe and concede NAT rules might not exist. - # Therefore use min_num_expected=0 - for cidr in cidrs: - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "SourceNatRule", - max_num_expected=1, min_num_expected=0, - source_ip_addresses=cidr) - if add_snat_rules: - ip_addresses = self._build_ip_address_list( - ctx_elevated, router.gw_port['fixed_ips']) - # Set the SNAT rule for each subnet (only first IP) - for cidr in cidrs: - cidr_prefix = int(cidr.split('/')[1]) - routerlib.create_lrouter_snat_rule( - self.cluster, nsx_router_id, - ip_addresses[0].split('/')[0], - ip_addresses[0].split('/')[0], - order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, - match_criteria={'source_ip_addresses': cidr}) - - def _update_router_port_attachment(self, cluster, context, - nsx_router_id, port_data, - nsx_router_port_id, - attachment_type, - attachment, - attachment_vlan=None): - if not nsx_router_port_id: - nsx_router_port_id = self._find_router_gw_port(context, port_data) - try: - routerlib.plug_router_port_attachment(cluster, nsx_router_id, - nsx_router_port_id, - attachment, - attachment_type, - attachment_vlan) - LOG.debug(_("Attached %(att)s to NSX router port %(port)s"), - {'att': attachment, 'port': nsx_router_port_id}) - except api_exc.NsxApiException: - # Must remove NSX logical port - routerlib.delete_router_lport(cluster, nsx_router_id, - nsx_router_port_id) - LOG.exception(_("Unable to plug attachment in NSX logical " - "router port %(r_port_id)s, associated with " - "Neutron %(q_port_id)s"), - {'r_port_id': nsx_router_port_id, - 'q_port_id': port_data.get('id')}) - raise nsx_exc.NsxPluginException( - err_msg=(_("Unable to plug attachment in router port " - "%(r_port_id)s for neutron port id %(q_port_id)s " - "on router %(router_id)s") % - {'r_port_id': nsx_router_port_id, - 'q_port_id': port_data.get('id'), - 'router_id': nsx_router_id})) - - def _get_port_by_device_id(self, context, device_id, device_owner): - """Retrieve ports associated with a specific device id. - - Used for retrieving all neutron ports attached to a given router. - """ - port_qry = context.session.query(models_v2.Port) - return port_qry.filter_by( - device_id=device_id, - device_owner=device_owner,).all() - - def _find_router_subnets_cidrs(self, context, router_id): - """Retrieve subnets attached to the specified router.""" - ports = self._get_port_by_device_id(context, router_id, - l3_db.DEVICE_OWNER_ROUTER_INTF) - # No need to check for overlapping CIDRs - cidrs = [] - for port in ports: - for ip in port.get('fixed_ips', []): - cidrs.append(self._get_subnet(context, - ip.subnet_id).cidr) - return cidrs - - def _nsx_find_lswitch_for_port(self, context, port_data): - network = self._get_network(context, port_data['network_id']) - network_bindings = nsx_db.get_network_bindings( - context.session, port_data['network_id']) - max_ports = self.nsx_opts.max_lp_per_overlay_ls - allow_extra_lswitches = False - for network_binding in network_bindings: - if network_binding.binding_type in (c_utils.NetworkTypes.FLAT, - c_utils.NetworkTypes.VLAN): - max_ports = self.nsx_opts.max_lp_per_bridged_ls - allow_extra_lswitches = True - break - try: - return self._handle_lswitch_selection( - context, self.cluster, network, network_bindings, - max_ports, allow_extra_lswitches) - except api_exc.NsxApiException: - err_desc = _("An exception occurred while selecting logical " - "switch for the port") - LOG.exception(err_desc) - raise nsx_exc.NsxPluginException(err_msg=err_desc) - - def _nsx_create_port_helper(self, session, ls_uuid, port_data, - do_port_security=True): - # Convert Neutron security groups identifiers into NSX security - # profiles identifiers - nsx_sec_profile_ids = [ - nsx_utils.get_nsx_security_group_id( - session, self.cluster, neutron_sg_id) for - neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])] - return switchlib.create_lport(self.cluster, - ls_uuid, - port_data['tenant_id'], - port_data['id'], - port_data['name'], - port_data['device_id'], - port_data['admin_state_up'], - port_data['mac_address'], - port_data['fixed_ips'], - port_data[psec.PORTSECURITY], - nsx_sec_profile_ids, - port_data.get(qos.QUEUE), - port_data.get(mac_ext.MAC_LEARNING), - port_data.get(addr_pair.ADDRESS_PAIRS)) - - def _handle_create_port_exception(self, context, port_id, - ls_uuid, lp_uuid): - with excutils.save_and_reraise_exception(): - # rollback nsx logical port only if it was successfully - # created on NSX. Should this command fail the original - # exception will be raised. - if lp_uuid: - # Remove orphaned port from NSX - switchlib.delete_port(self.cluster, ls_uuid, lp_uuid) - # rollback the neutron-nsx port mapping - nsx_db.delete_neutron_nsx_port_mapping(context.session, - port_id) - msg = (_("An exception occurred while creating the " - "neutron port %s on the NSX plaform") % port_id) - LOG.exception(msg) - - def _nsx_create_port(self, context, port_data): - """Driver for creating a logical switch port on NSX platform.""" - # FIXME(salvatore-orlando): On the NSX platform we do not really have - # external networks. So if as user tries and create a "regular" VIF - # port on an external network we are unable to actually create. - # However, in order to not break unit tests, we need to still create - # the DB object and return success - if self._network_is_external(context, port_data['network_id']): - LOG.info(_("NSX plugin does not support regular VIF ports on " - "external networks. Port %s will be down."), - port_data['network_id']) - # No need to actually update the DB state - the default is down - return port_data - lport = None - selected_lswitch = None - try: - selected_lswitch = self._nsx_find_lswitch_for_port(context, - port_data) - lport = self._nsx_create_port_helper(context.session, - selected_lswitch['uuid'], - port_data, - True) - nsx_db.add_neutron_nsx_port_mapping( - context.session, port_data['id'], - selected_lswitch['uuid'], lport['uuid']) - if port_data['device_owner'] not in self.port_special_owners: - switchlib.plug_vif_interface( - self.cluster, selected_lswitch['uuid'], - lport['uuid'], "VifAttachment", port_data['id']) - LOG.debug(_("_nsx_create_port completed for port %(name)s " - "on network %(network_id)s. The new port id is " - "%(id)s."), port_data) - except (api_exc.NsxApiException, n_exc.NeutronException): - self._handle_create_port_exception( - context, port_data['id'], - selected_lswitch and selected_lswitch['uuid'], - lport and lport['uuid']) - except db_exc.DBError as e: - if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and - isinstance(e.inner_exception, sql_exc.IntegrityError)): - msg = (_("Concurrent network deletion detected; Back-end Port " - "%(nsx_id)s creation to be rolled back for Neutron " - "port: %(neutron_id)s") - % {'nsx_id': lport['uuid'], - 'neutron_id': port_data['id']}) - LOG.warning(msg) - if selected_lswitch and lport: - try: - switchlib.delete_port(self.cluster, - selected_lswitch['uuid'], - lport['uuid']) - except n_exc.NotFound: - LOG.debug(_("NSX Port %s already gone"), lport['uuid']) - - def _nsx_delete_port(self, context, port_data): - # FIXME(salvatore-orlando): On the NSX platform we do not really have - # external networks. So deleting regular ports from external networks - # does not make sense. However we cannot raise as this would break - # unit tests. - if self._network_is_external(context, port_data['network_id']): - LOG.info(_("NSX plugin does not support regular VIF ports on " - "external networks. Port %s will be down."), - port_data['network_id']) - return - nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( - context.session, self.cluster, port_data['id']) - if not nsx_port_id: - LOG.debug(_("Port '%s' was already deleted on NSX platform"), id) - return - # TODO(bgh): if this is a bridged network and the lswitch we just got - # back will have zero ports after the delete we should garbage collect - # the lswitch. - try: - switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id) - LOG.debug(_("_nsx_delete_port completed for port %(port_id)s " - "on network %(net_id)s"), - {'port_id': port_data['id'], - 'net_id': port_data['network_id']}) - except n_exc.NotFound: - LOG.warning(_("Port %s not found in NSX"), port_data['id']) - - def _nsx_delete_router_port(self, context, port_data): - # Delete logical router port - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, port_data['device_id']) - nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( - context.session, self.cluster, port_data['id']) - if not nsx_port_id: - LOG.warn(_("Neutron port %(port_id)s not found on NSX backend. " - "Terminating delete operation. A dangling router port " - "might have been left on router %(router_id)s"), - {'port_id': port_data['id'], - 'router_id': nsx_router_id}) - return - try: - routerlib.delete_peer_router_lport(self.cluster, - nsx_router_id, - nsx_switch_id, - nsx_port_id) - except api_exc.NsxApiException: - # Do not raise because the issue might as well be that the - # router has already been deleted, so there would be nothing - # to do here - LOG.exception(_("Ignoring exception as this means the peer " - "for port '%s' has already been deleted."), - nsx_port_id) - - # Delete logical switch port - self._nsx_delete_port(context, port_data) - - def _nsx_create_router_port(self, context, port_data): - """Driver for creating a switch port to be connected to a router.""" - # No router ports on external networks! - if self._network_is_external(context, port_data['network_id']): - raise nsx_exc.NsxPluginException( - err_msg=(_("It is not allowed to create router interface " - "ports on external networks as '%s'") % - port_data['network_id'])) - ls_port = None - selected_lswitch = None - try: - selected_lswitch = self._nsx_find_lswitch_for_port( - context, port_data) - # Do not apply port security here! - ls_port = self._nsx_create_port_helper( - context.session, selected_lswitch['uuid'], - port_data, False) - # Assuming subnet being attached is on first fixed ip - # element in port data - subnet_id = port_data['fixed_ips'][0]['subnet_id'] - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, port_data['device_id']) - # Create peer port on logical router - self._create_and_attach_router_port( - self.cluster, context, nsx_router_id, port_data, - "PatchAttachment", ls_port['uuid'], - subnet_ids=[subnet_id]) - nsx_db.add_neutron_nsx_port_mapping( - context.session, port_data['id'], - selected_lswitch['uuid'], ls_port['uuid']) - LOG.debug(_("_nsx_create_router_port completed for port " - "%(name)s on network %(network_id)s. The new " - "port id is %(id)s."), - port_data) - except (api_exc.NsxApiException, n_exc.NeutronException): - self._handle_create_port_exception( - context, port_data['id'], - selected_lswitch and selected_lswitch['uuid'], - ls_port and ls_port['uuid']) - - def _find_router_gw_port(self, context, port_data): - router_id = port_data['device_id'] - if not router_id: - raise n_exc.BadRequest(_("device_id field must be populated in " - "order to create an external gateway " - "port for network %s"), - port_data['network_id']) - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - lr_port = routerlib.find_router_gw_port(context, self.cluster, - nsx_router_id) - if not lr_port: - raise nsx_exc.NsxPluginException( - err_msg=(_("The gateway port for the NSX router %s " - "was not found on the backend") - % nsx_router_id)) - return lr_port - - @lockutils.synchronized('vmware', 'neutron-') - def _nsx_create_ext_gw_port(self, context, port_data): - """Driver for creating an external gateway port on NSX platform.""" - # TODO(salvatore-orlando): Handle NSX resource - # rollback when something goes not quite as expected - lr_port = self._find_router_gw_port(context, port_data) - ip_addresses = self._build_ip_address_list(context, - port_data['fixed_ips']) - # This operation actually always updates a NSX logical port - # instead of creating one. This is because the gateway port - # is created at the same time as the NSX logical router, otherwise - # the fabric status of the NSX router will be down. - # admin_status should always be up for the gateway port - # regardless of what the user specifies in neutron - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, port_data['device_id']) - routerlib.update_router_lport(self.cluster, - nsx_router_id, - lr_port['uuid'], - port_data['tenant_id'], - port_data['id'], - port_data['name'], - True, - ip_addresses) - ext_network = self.get_network(context, port_data['network_id']) - if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT: - # Update attachment - physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or - self.cluster.default_l3_gw_service_uuid) - self._update_router_port_attachment( - self.cluster, context, nsx_router_id, port_data, - lr_port['uuid'], - "L3GatewayAttachment", - physical_network, - ext_network[pnet.SEGMENTATION_ID]) - - LOG.debug(_("_nsx_create_ext_gw_port completed on external network " - "%(ext_net_id)s, attached to router:%(router_id)s. " - "NSX port id is %(nsx_port_id)s"), - {'ext_net_id': port_data['network_id'], - 'router_id': nsx_router_id, - 'nsx_port_id': lr_port['uuid']}) - - @lockutils.synchronized('vmware', 'neutron-') - def _nsx_delete_ext_gw_port(self, context, port_data): - lr_port = self._find_router_gw_port(context, port_data) - # TODO(salvatore-orlando): Handle NSX resource - # rollback when something goes not quite as expected - try: - # Delete is actually never a real delete, otherwise the NSX - # logical router will stop working - router_id = port_data['device_id'] - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - routerlib.update_router_lport(self.cluster, - nsx_router_id, - lr_port['uuid'], - port_data['tenant_id'], - port_data['id'], - port_data['name'], - True, - ['0.0.0.0/31']) - # Reset attachment - self._update_router_port_attachment( - self.cluster, context, nsx_router_id, port_data, - lr_port['uuid'], - "L3GatewayAttachment", - self.cluster.default_l3_gw_service_uuid) - - except api_exc.ResourceNotFound: - raise nsx_exc.NsxPluginException( - err_msg=_("Logical router resource %s not found " - "on NSX platform") % router_id) - except api_exc.NsxApiException: - raise nsx_exc.NsxPluginException( - err_msg=_("Unable to update logical router" - "on NSX Platform")) - LOG.debug(_("_nsx_delete_ext_gw_port completed on external network " - "%(ext_net_id)s, attached to NSX router:%(router_id)s"), - {'ext_net_id': port_data['network_id'], - 'router_id': nsx_router_id}) - - def _nsx_create_l2_gw_port(self, context, port_data): - """Create a switch port, and attach it to a L2 gateway attachment.""" - # FIXME(salvatore-orlando): On the NSX platform we do not really have - # external networks. So if as user tries and create a "regular" VIF - # port on an external network we are unable to actually create. - # However, in order to not break unit tests, we need to still create - # the DB object and return success - if self._network_is_external(context, port_data['network_id']): - LOG.info(_("NSX plugin does not support regular VIF ports on " - "external networks. Port %s will be down."), - port_data['network_id']) - # No need to actually update the DB state - the default is down - return port_data - lport = None - try: - selected_lswitch = self._nsx_find_lswitch_for_port( - context, port_data) - lport = self._nsx_create_port_helper( - context.session, - selected_lswitch['uuid'], - port_data, - True) - nsx_db.add_neutron_nsx_port_mapping( - context.session, port_data['id'], - selected_lswitch['uuid'], lport['uuid']) - l2gwlib.plug_l2_gw_service( - self.cluster, - selected_lswitch['uuid'], - lport['uuid'], - port_data['device_id'], - int(port_data.get('gw:segmentation_id') or 0)) - except Exception: - with excutils.save_and_reraise_exception(): - if lport: - switchlib.delete_port(self.cluster, - selected_lswitch['uuid'], - lport['uuid']) - LOG.debug(_("_nsx_create_l2_gw_port completed for port %(name)s " - "on network %(network_id)s. The new port id " - "is %(id)s."), port_data) - - def _nsx_create_fip_port(self, context, port_data): - # As we do not create ports for floating IPs in NSX, - # this is a no-op driver - pass - - def _nsx_delete_fip_port(self, context, port_data): - # As we do not create ports for floating IPs in NSX, - # this is a no-op driver - pass - - def _extend_fault_map(self): - """Extends the Neutron Fault Map. - - Exceptions specific to the NSX Plugin are mapped to standard - HTTP Exceptions. - """ - base.FAULT_MAP.update({nsx_exc.InvalidNovaZone: - webob.exc.HTTPBadRequest, - nsx_exc.NoMorePortsException: - webob.exc.HTTPBadRequest, - nsx_exc.MaintenanceInProgress: - webob.exc.HTTPServiceUnavailable, - nsx_exc.InvalidSecurityCertificate: - webob.exc.HTTPBadRequest}) - - def _validate_provider_create(self, context, network): - if not attr.is_attr_set(network.get(mpnet.SEGMENTS)): - return - - for segment in network[mpnet.SEGMENTS]: - network_type = segment.get(pnet.NETWORK_TYPE) - physical_network = segment.get(pnet.PHYSICAL_NETWORK) - segmentation_id = segment.get(pnet.SEGMENTATION_ID) - network_type_set = attr.is_attr_set(network_type) - segmentation_id_set = attr.is_attr_set(segmentation_id) - - err_msg = None - if not network_type_set: - err_msg = _("%s required") % pnet.NETWORK_TYPE - elif network_type in (c_utils.NetworkTypes.GRE, - c_utils.NetworkTypes.STT, - c_utils.NetworkTypes.FLAT): - if segmentation_id_set: - err_msg = _("Segmentation ID cannot be specified with " - "flat network type") - elif network_type == c_utils.NetworkTypes.VLAN: - if not segmentation_id_set: - err_msg = _("Segmentation ID must be specified with " - "vlan network type") - elif (segmentation_id_set and - not utils.is_valid_vlan_tag(segmentation_id)): - err_msg = (_("%(segmentation_id)s out of range " - "(%(min_id)s through %(max_id)s)") % - {'segmentation_id': segmentation_id, - 'min_id': constants.MIN_VLAN_TAG, - 'max_id': constants.MAX_VLAN_TAG}) - else: - # Verify segment is not already allocated - bindings = nsx_db.get_network_bindings_by_vlanid( - context.session, segmentation_id) - if bindings: - raise n_exc.VlanIdInUse( - vlan_id=segmentation_id, - physical_network=physical_network) - elif network_type == c_utils.NetworkTypes.L3_EXT: - if (segmentation_id_set and - not utils.is_valid_vlan_tag(segmentation_id)): - err_msg = (_("%(segmentation_id)s out of range " - "(%(min_id)s through %(max_id)s)") % - {'segmentation_id': segmentation_id, - 'min_id': constants.MIN_VLAN_TAG, - 'max_id': constants.MAX_VLAN_TAG}) - else: - err_msg = (_("%(net_type_param)s %(net_type_value)s not " - "supported") % - {'net_type_param': pnet.NETWORK_TYPE, - 'net_type_value': network_type}) - if err_msg: - raise n_exc.InvalidInput(error_message=err_msg) - # TODO(salvatore-orlando): Validate tranport zone uuid - # which should be specified in physical_network - - def _extend_network_dict_provider(self, context, network, - multiprovider=None, bindings=None): - if not bindings: - bindings = nsx_db.get_network_bindings(context.session, - network['id']) - if not multiprovider: - multiprovider = nsx_db.is_multiprovider_network(context.session, - network['id']) - # With NSX plugin 'normal' overlay networks will have no binding - # TODO(salvatore-orlando) make sure users can specify a distinct - # phy_uuid as 'provider network' for STT net type - if bindings: - if not multiprovider: - # network came in through provider networks api - network[pnet.NETWORK_TYPE] = bindings[0].binding_type - network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid - network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id - else: - # network come in though multiprovider networks api - network[mpnet.SEGMENTS] = [ - {pnet.NETWORK_TYPE: binding.binding_type, - pnet.PHYSICAL_NETWORK: binding.phy_uuid, - pnet.SEGMENTATION_ID: binding.vlan_id} - for binding in bindings] - - def _handle_lswitch_selection(self, context, cluster, network, - network_bindings, max_ports, - allow_extra_lswitches): - lswitches = nsx_utils.fetch_nsx_switches( - context.session, cluster, network.id) - try: - return [ls for ls in lswitches - if (ls['_relations']['LogicalSwitchStatus'] - ['lport_count'] < max_ports)].pop(0) - except IndexError: - # Too bad, no switch available - LOG.debug(_("No switch has available ports (%d checked)"), - len(lswitches)) - if allow_extra_lswitches: - # The 'main' logical switch is either the only one available - # or the one where the 'multi_lswitch' tag was set - while lswitches: - main_ls = lswitches.pop(0) - tag_dict = dict((x['scope'], x['tag']) - for x in main_ls['tags']) - if 'multi_lswitch' in tag_dict: - break - else: - # by construction this statement is hit if there is only one - # logical switch and the multi_lswitch tag has not been set. - # The tag must therefore be added. - tags = main_ls['tags'] - tags.append({'tag': 'True', 'scope': 'multi_lswitch'}) - switchlib.update_lswitch(cluster, - main_ls['uuid'], - main_ls['display_name'], - network['tenant_id'], - tags=tags) - transport_zone_config = self._convert_to_nsx_transport_zones( - cluster, network, bindings=network_bindings) - selected_lswitch = switchlib.create_lswitch( - cluster, network.id, network.tenant_id, - "%s-ext-%s" % (network.name, len(lswitches)), - transport_zone_config) - # add a mapping between the neutron network and the newly - # created logical switch - nsx_db.add_neutron_nsx_network_mapping( - context.session, network.id, selected_lswitch['uuid']) - return selected_lswitch - else: - LOG.error(_("Maximum number of logical ports reached for " - "logical network %s"), network.id) - raise nsx_exc.NoMorePortsException(network=network.id) - - def _convert_to_nsx_transport_zones(self, cluster, network=None, - bindings=None): - nsx_transport_zones_config = [] - - # Convert fields from provider request to nsx format - if (network and not attr.is_attr_set( - network.get(mpnet.SEGMENTS))): - return [{"zone_uuid": cluster.default_tz_uuid, - "transport_type": cfg.CONF.NSX.default_transport_type}] - - # Convert fields from db to nsx format - if bindings: - transport_entry = {} - for binding in bindings: - if binding.binding_type in [c_utils.NetworkTypes.FLAT, - c_utils.NetworkTypes.VLAN]: - transport_entry['transport_type'] = ( - c_utils.NetworkTypes.BRIDGE) - transport_entry['binding_config'] = {} - vlan_id = binding.vlan_id - if vlan_id: - transport_entry['binding_config'] = ( - {'vlan_translation': [{'transport': vlan_id}]}) - else: - transport_entry['transport_type'] = binding.binding_type - transport_entry['zone_uuid'] = binding.phy_uuid - nsx_transport_zones_config.append(transport_entry) - return nsx_transport_zones_config - - for transport_zone in network.get(mpnet.SEGMENTS): - for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID]: - if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED: - transport_zone[value] = None - - transport_entry = {} - transport_type = transport_zone.get(pnet.NETWORK_TYPE) - if transport_type in [c_utils.NetworkTypes.FLAT, - c_utils.NetworkTypes.VLAN]: - transport_entry['transport_type'] = c_utils.NetworkTypes.BRIDGE - transport_entry['binding_config'] = {} - vlan_id = transport_zone.get(pnet.SEGMENTATION_ID) - if vlan_id: - transport_entry['binding_config'] = ( - {'vlan_translation': [{'transport': vlan_id}]}) - else: - transport_entry['transport_type'] = transport_type - transport_entry['zone_uuid'] = ( - transport_zone[pnet.PHYSICAL_NETWORK] or - cluster.default_tz_uuid) - nsx_transport_zones_config.append(transport_entry) - return nsx_transport_zones_config - - def _convert_to_transport_zones_dict(self, network): - """Converts the provider request body to multiprovider. - Returns: True if request is multiprovider False if provider - and None if neither. - """ - if any(attr.is_attr_set(network.get(f)) - for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, - pnet.SEGMENTATION_ID)): - if attr.is_attr_set(network.get(mpnet.SEGMENTS)): - raise mpnet.SegmentsSetInConjunctionWithProviders() - # convert to transport zone list - network[mpnet.SEGMENTS] = [ - {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], - pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], - pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] - del network[pnet.NETWORK_TYPE] - del network[pnet.PHYSICAL_NETWORK] - del network[pnet.SEGMENTATION_ID] - return False - if attr.is_attr_set(mpnet.SEGMENTS): - return True - - def create_network(self, context, network): - net_data = network['network'] - tenant_id = self._get_tenant_id_for_create(context, net_data) - self._ensure_default_security_group(context, tenant_id) - # Process the provider network extension - provider_type = self._convert_to_transport_zones_dict(net_data) - self._validate_provider_create(context, net_data) - # Replace ATTR_NOT_SPECIFIED with None before sending to NSX - for key, value in network['network'].iteritems(): - if value is attr.ATTR_NOT_SPECIFIED: - net_data[key] = None - # FIXME(arosen) implement admin_state_up = False in NSX - if net_data['admin_state_up'] is False: - LOG.warning(_("Network with admin_state_up=False are not yet " - "supported by this plugin. Ignoring setting for " - "network %s"), net_data.get('name', '')) - transport_zone_config = self._convert_to_nsx_transport_zones( - self.cluster, net_data) - external = net_data.get(ext_net_extn.EXTERNAL) - # NOTE(salv-orlando): Pre-generating uuid for Neutron - # network. This will be removed once the network create operation - # becomes an asynchronous task - net_data['id'] = str(uuid.uuid4()) - if (not attr.is_attr_set(external) or - attr.is_attr_set(external) and not external): - lswitch = switchlib.create_lswitch( - self.cluster, net_data['id'], - tenant_id, net_data.get('name'), - transport_zone_config, - shared=net_data.get(attr.SHARED)) - - with context.session.begin(subtransactions=True): - new_net = super(NsxPluginV2, self).create_network(context, - network) - # Process port security extension - self._process_network_port_security_create( - context, net_data, new_net) - # DB Operations for setting the network as external - self._process_l3_create(context, new_net, net_data) - # Process QoS queue extension - net_queue_id = net_data.get(qos.QUEUE) - if net_queue_id: - # Raises if not found - self.get_qos_queue(context, net_queue_id) - self._process_network_queue_mapping( - context, new_net, net_queue_id) - # Add mapping between neutron network and NSX switch - if (not attr.is_attr_set(external) or - attr.is_attr_set(external) and not external): - nsx_db.add_neutron_nsx_network_mapping( - context.session, new_net['id'], - lswitch['uuid']) - if (net_data.get(mpnet.SEGMENTS) and - isinstance(provider_type, bool)): - net_bindings = [] - for tz in net_data[mpnet.SEGMENTS]: - segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) - segmentation_id_set = attr.is_attr_set(segmentation_id) - if not segmentation_id_set: - segmentation_id = 0 - net_bindings.append(nsx_db.add_network_binding( - context.session, new_net['id'], - tz.get(pnet.NETWORK_TYPE), - tz.get(pnet.PHYSICAL_NETWORK), - segmentation_id)) - if provider_type: - nsx_db.set_multiprovider_network(context.session, - new_net['id']) - self._extend_network_dict_provider(context, new_net, - provider_type, - net_bindings) - self.handle_network_dhcp_access(context, new_net, - action='create_network') - return new_net - - def delete_network(self, context, id): - external = self._network_is_external(context, id) - # Before deleting ports, ensure the peer of a NSX logical - # port with a patch attachment is removed too - port_filter = {'network_id': [id], - 'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]} - router_iface_ports = self.get_ports(context, filters=port_filter) - for port in router_iface_ports: - nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( - context.session, self.cluster, id) - # Before removing entry from Neutron DB, retrieve NSX switch - # identifiers for removing them from backend - if not external: - lswitch_ids = nsx_utils.get_nsx_switch_ids( - context.session, self.cluster, id) - with context.session.begin(subtransactions=True): - self._process_l3_delete(context, id) - super(NsxPluginV2, self).delete_network(context, id) - - # clean up network owned ports - for port in router_iface_ports: - try: - if nsx_port_id: - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, port['device_id']) - routerlib.delete_peer_router_lport(self.cluster, - nsx_router_id, - nsx_switch_id, - nsx_port_id) - else: - LOG.warning(_("A nsx lport identifier was not found for " - "neutron port '%s'. Unable to remove " - "the peer router port for this switch port"), - port['id']) - - except (TypeError, KeyError, - api_exc.NsxApiException, - api_exc.ResourceNotFound): - # Do not raise because the issue might as well be that the - # router has already been deleted, so there would be nothing - # to do here - LOG.warning(_("Ignoring exception as this means the peer for " - "port '%s' has already been deleted."), - nsx_port_id) - - # Do not go to NSX for external networks - if not external: - try: - switchlib.delete_networks(self.cluster, id, lswitch_ids) - LOG.debug(_("delete_network completed for tenant: %s"), - context.tenant_id) - except n_exc.NotFound: - LOG.warning(_("Did not found lswitch %s in NSX"), id) - self.handle_network_dhcp_access(context, id, action='delete_network') - - def get_network(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - # goto to the plugin DB and fetch the network - network = self._get_network(context, id) - if (self.nsx_sync_opts.always_read_status or - fields and 'status' in fields): - # External networks are not backed by nsx lswitches - if not network.external: - # Perform explicit state synchronization - self._synchronizer.synchronize_network(context, network) - # Don't do field selection here otherwise we won't be able - # to add provider networks fields - net_result = self._make_network_dict(network) - self._extend_network_dict_provider(context, net_result) - return self._fields(net_result, fields) - - def get_networks(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - filters = filters or {} - with context.session.begin(subtransactions=True): - networks = ( - super(NsxPluginV2, self).get_networks( - context, filters, fields, sorts, - limit, marker, page_reverse)) - for net in networks: - self._extend_network_dict_provider(context, net) - return [self._fields(network, fields) for network in networks] - - def update_network(self, context, id, network): - pnet._raise_if_updates_provider_attributes(network['network']) - if network["network"].get("admin_state_up") is False: - raise NotImplementedError(_("admin_state_up=False networks " - "are not supported.")) - with context.session.begin(subtransactions=True): - net = super(NsxPluginV2, self).update_network(context, id, network) - if psec.PORTSECURITY in network['network']: - self._process_network_port_security_update( - context, network['network'], net) - net_queue_id = network['network'].get(qos.QUEUE) - if net_queue_id: - self._delete_network_queue_mapping(context, id) - self._process_network_queue_mapping(context, net, net_queue_id) - self._process_l3_update(context, net, network['network']) - self._extend_network_dict_provider(context, net) - # If provided, update port name on backend; treat backend failures as - # not critical (log error, but do not raise) - if 'name' in network['network']: - # in case of chained switches update name only for the first one - nsx_switch_ids = nsx_utils.get_nsx_switch_ids( - context.session, self.cluster, id) - if not nsx_switch_ids or len(nsx_switch_ids) < 1: - LOG.warn(_("Unable to find NSX mappings for neutron " - "network:%s"), id) - try: - switchlib.update_lswitch(self.cluster, - nsx_switch_ids[0], - network['network']['name']) - except api_exc.NsxApiException as e: - LOG.warn(_("Logical switch update on NSX backend failed. " - "Neutron network id:%(net_id)s; " - "NSX lswitch id:%(lswitch_id)s;" - "Error:%(error)s"), - {'net_id': id, 'lswitch_id': nsx_switch_ids[0], - 'error': e}) - - return net - - def create_port(self, context, port): - # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED - # then we pass the port to the policy engine. The reason why we don't - # pass the value to the policy engine when the port is - # ATTR_NOT_SPECIFIED is for the case where a port is created on a - # shared network that is not owned by the tenant. - port_data = port['port'] - # Set port status as 'DOWN'. This will be updated by backend sync. - port_data['status'] = constants.PORT_STATUS_DOWN - with context.session.begin(subtransactions=True): - # First we allocate port in neutron database - neutron_db = super(NsxPluginV2, self).create_port(context, port) - neutron_port_id = neutron_db['id'] - # Update fields obtained from neutron db (eg: MAC address) - port["port"].update(neutron_db) - self.handle_port_metadata_access(context, neutron_db) - # port security extension checks - (port_security, has_ip) = self._determine_port_security_and_has_ip( - context, port_data) - port_data[psec.PORTSECURITY] = port_security - self._process_port_port_security_create( - context, port_data, neutron_db) - # allowed address pair checks - if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)): - if not port_security: - raise addr_pair.AddressPairAndPortSecurityRequired() - else: - self._process_create_allowed_address_pairs( - context, neutron_db, - port_data[addr_pair.ADDRESS_PAIRS]) - else: - # remove ATTR_NOT_SPECIFIED - port_data[addr_pair.ADDRESS_PAIRS] = None - - # security group extension checks - if port_security and has_ip: - self._ensure_default_security_group_on_port(context, port) - elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): - raise psec.PortSecurityAndIPRequiredForSecurityGroups() - port_data[ext_sg.SECURITYGROUPS] = ( - self._get_security_groups_on_port(context, port)) - self._process_port_create_security_group( - context, port_data, port_data[ext_sg.SECURITYGROUPS]) - # QoS extension checks - port_queue_id = self._check_for_queue_and_create( - context, port_data) - self._process_port_queue_mapping( - context, port_data, port_queue_id) - if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)): - self._create_mac_learning_state(context, port_data) - elif mac_ext.MAC_LEARNING in port_data: - port_data.pop(mac_ext.MAC_LEARNING) - self._process_portbindings_create_and_update(context, - port['port'], - port_data) - # DB Operation is complete, perform NSX operation - try: - port_data = port['port'].copy() - port_create_func = self._port_drivers['create'].get( - port_data['device_owner'], - self._port_drivers['create']['default']) - port_create_func(context, port_data) - LOG.debug(_("port created on NSX backend for tenant " - "%(tenant_id)s: (%(id)s)"), port_data) - except n_exc.NotFound: - LOG.warning(_("Logical switch for network %s was not " - "found in NSX."), port_data['network_id']) - # Put port in error on neutron DB - with context.session.begin(subtransactions=True): - port = self._get_port(context, neutron_port_id) - port_data['status'] = constants.PORT_STATUS_ERROR - port['status'] = port_data['status'] - context.session.add(port) - except Exception: - # Port must be removed from neutron DB - with excutils.save_and_reraise_exception(): - LOG.error(_("Unable to create port or set port " - "attachment in NSX.")) - with context.session.begin(subtransactions=True): - self._delete_port(context, neutron_port_id) - - self.handle_port_dhcp_access(context, port_data, action='create_port') - return port_data - - def update_port(self, context, id, port): - delete_security_groups = self._check_update_deletes_security_groups( - port) - has_security_groups = self._check_update_has_security_groups(port) - delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( - port) - has_addr_pairs = self._check_update_has_allowed_address_pairs(port) - - with context.session.begin(subtransactions=True): - ret_port = super(NsxPluginV2, self).update_port( - context, id, port) - # Save current mac learning state to check whether it's - # being updated or not - old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) - # copy values over - except fixed_ips as - # they've already been processed - port['port'].pop('fixed_ips', None) - ret_port.update(port['port']) - tenant_id = self._get_tenant_id_for_create(context, ret_port) - - # populate port_security setting - if psec.PORTSECURITY not in port['port']: - ret_port[psec.PORTSECURITY] = self._get_port_security_binding( - context, id) - has_ip = self._ip_on_port(ret_port) - # validate port security and allowed address pairs - if not ret_port[psec.PORTSECURITY]: - # has address pairs in request - if has_addr_pairs: - raise addr_pair.AddressPairAndPortSecurityRequired() - elif not delete_addr_pairs: - # check if address pairs are in db - ret_port[addr_pair.ADDRESS_PAIRS] = ( - self.get_allowed_address_pairs(context, id)) - if ret_port[addr_pair.ADDRESS_PAIRS]: - raise addr_pair.AddressPairAndPortSecurityRequired() - - if (delete_addr_pairs or has_addr_pairs): - # delete address pairs and read them in - self._delete_allowed_address_pairs(context, id) - self._process_create_allowed_address_pairs( - context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS]) - # checks if security groups were updated adding/modifying - # security groups, port security is set and port has ip - if not (has_ip and ret_port[psec.PORTSECURITY]): - if has_security_groups: - raise psec.PortSecurityAndIPRequiredForSecurityGroups() - # Update did not have security groups passed in. Check - # that port does not have any security groups already on it. - filters = {'port_id': [id]} - security_groups = ( - super(NsxPluginV2, self)._get_port_security_group_bindings( - context, filters) - ) - if security_groups and not delete_security_groups: - raise psec.PortSecurityPortHasSecurityGroup() - - if (delete_security_groups or has_security_groups): - # delete the port binding and read it with the new rules. - self._delete_port_security_group_bindings(context, id) - sgids = self._get_security_groups_on_port(context, port) - self._process_port_create_security_group(context, ret_port, - sgids) - - if psec.PORTSECURITY in port['port']: - self._process_port_port_security_update( - context, port['port'], ret_port) - - port_queue_id = self._check_for_queue_and_create( - context, ret_port) - # Populate the mac learning attribute - new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING) - if (new_mac_learning_state is not None and - old_mac_learning_state != new_mac_learning_state): - self._update_mac_learning_state(context, id, - new_mac_learning_state) - ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state - self._delete_port_queue_mapping(context, ret_port['id']) - self._process_port_queue_mapping(context, ret_port, - port_queue_id) - LOG.debug(_("Updating port: %s"), port) - nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( - context.session, self.cluster, id) - # Convert Neutron security groups identifiers into NSX security - # profiles identifiers - nsx_sec_profile_ids = [ - nsx_utils.get_nsx_security_group_id( - context.session, self.cluster, neutron_sg_id) for - neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])] - - if nsx_port_id: - try: - switchlib.update_port( - self.cluster, nsx_switch_id, nsx_port_id, - id, tenant_id, - ret_port['name'], - ret_port['device_id'], - ret_port['admin_state_up'], - ret_port['mac_address'], - ret_port['fixed_ips'], - ret_port[psec.PORTSECURITY], - nsx_sec_profile_ids, - ret_port[qos.QUEUE], - ret_port.get(mac_ext.MAC_LEARNING), - ret_port.get(addr_pair.ADDRESS_PAIRS)) - - # Update the port status from nsx. If we fail here hide it - # since the port was successfully updated but we were not - # able to retrieve the status. - ret_port['status'] = switchlib.get_port_status( - self.cluster, nsx_switch_id, - nsx_port_id) - # FIXME(arosen) improve exception handling. - except Exception: - ret_port['status'] = constants.PORT_STATUS_ERROR - LOG.exception(_("Unable to update port id: %s."), - nsx_port_id) - - # If nsx_port_id is not in database or in nsx put in error state. - else: - ret_port['status'] = constants.PORT_STATUS_ERROR - - self._process_portbindings_create_and_update(context, - port['port'], - ret_port) - return ret_port - - def delete_port(self, context, id, l3_port_check=True, - nw_gw_port_check=True): - """Deletes a port on a specified Virtual Network. - - If the port contains a remote interface attachment, the remote - interface is first un-plugged and then the port is deleted. - - :returns: None - :raises: exception.PortInUse - :raises: exception.PortNotFound - :raises: exception.NetworkNotFound - """ - # if needed, check to see if this is a port owned by - # a l3 router. If so, we should prevent deletion here - if l3_port_check: - self.prevent_l3_port_deletion(context, id) - neutron_db_port = self.get_port(context, id) - # Perform the same check for ports owned by layer-2 gateways - if nw_gw_port_check: - self.prevent_network_gateway_port_deletion(context, - neutron_db_port) - port_delete_func = self._port_drivers['delete'].get( - neutron_db_port['device_owner'], - self._port_drivers['delete']['default']) - - port_delete_func(context, neutron_db_port) - self.disassociate_floatingips(context, id) - with context.session.begin(subtransactions=True): - queue = self._get_port_queue_bindings(context, {'port_id': [id]}) - # metadata_dhcp_host_route - self.handle_port_metadata_access( - context, neutron_db_port, is_delete=True) - super(NsxPluginV2, self).delete_port(context, id) - # Delete qos queue if possible - if queue: - self.delete_qos_queue(context, queue[0]['queue_id'], False) - self.handle_port_dhcp_access( - context, neutron_db_port, action='delete_port') - - def get_port(self, context, id, fields=None): - with context.session.begin(subtransactions=True): - if (self.nsx_sync_opts.always_read_status or - fields and 'status' in fields): - # Perform explicit state synchronization - db_port = self._get_port(context, id) - self._synchronizer.synchronize_port( - context, db_port) - return self._make_port_dict(db_port, fields) - else: - return super(NsxPluginV2, self).get_port(context, id, fields) - - def get_router(self, context, id, fields=None): - if (self.nsx_sync_opts.always_read_status or - fields and 'status' in fields): - db_router = self._get_router(context, id) - # Perform explicit state synchronization - self._synchronizer.synchronize_router( - context, db_router) - return self._make_router_dict(db_router, fields) - else: - return super(NsxPluginV2, self).get_router(context, id, fields) - - def _create_lrouter(self, context, router, nexthop): - tenant_id = self._get_tenant_id_for_create(context, router) - distributed = router.get('distributed') - try: - lrouter = routerlib.create_lrouter( - self.cluster, router['id'], - tenant_id, router['name'], nexthop, - distributed=attr.is_attr_set(distributed) and distributed) - except nsx_exc.InvalidVersion: - msg = _("Cannot create a distributed router with the NSX " - "platform currently in execution. Please, try " - "without specifying the 'distributed' attribute.") - LOG.exception(msg) - raise n_exc.BadRequest(resource='router', msg=msg) - except api_exc.NsxApiException: - err_msg = _("Unable to create logical router on NSX Platform") - LOG.exception(err_msg) - raise nsx_exc.NsxPluginException(err_msg=err_msg) - - # Create the port here - and update it later if we have gw_info - try: - self._create_and_attach_router_port( - self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True}, - "L3GatewayAttachment", - self.cluster.default_l3_gw_service_uuid) - except nsx_exc.NsxPluginException: - LOG.exception(_("Unable to create L3GW port on logical router " - "%(router_uuid)s. Verify Default Layer-3 Gateway " - "service %(def_l3_gw_svc)s id is correct"), - {'router_uuid': lrouter['uuid'], - 'def_l3_gw_svc': - self.cluster.default_l3_gw_service_uuid}) - # Try and remove logical router from NSX - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - # Return user a 500 with an apter message - raise nsx_exc.NsxPluginException( - err_msg=(_("Unable to create router %s on NSX backend") % - router['id'])) - lrouter['status'] = plugin_const.ACTIVE - return lrouter - - def create_router(self, context, router): - # NOTE(salvatore-orlando): We completely override this method in - # order to be able to use the NSX ID as Neutron ID - # TODO(salvatore-orlando): Propose upstream patch for allowing - # 3rd parties to specify IDs as we do with l2 plugin - r = router['router'] - has_gw_info = False - tenant_id = self._get_tenant_id_for_create(context, r) - # default value to set - nsx wants it (even if we don't have it) - nexthop = NSX_DEFAULT_NEXTHOP - # if external gateway info are set, then configure nexthop to - # default external gateway - if 'external_gateway_info' in r and r.get('external_gateway_info'): - has_gw_info = True - gw_info = r['external_gateway_info'] - del r['external_gateway_info'] - # The following DB read will be performed again when updating - # gateway info. This is not great, but still better than - # creating NSX router here and updating it later - network_id = (gw_info.get('network_id', None) if gw_info - else None) - if network_id: - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise n_exc.BadRequest(resource='router', msg=msg) - if ext_net.subnets: - ext_subnet = ext_net.subnets[0] - nexthop = ext_subnet.gateway_ip - # NOTE(salv-orlando): Pre-generating uuid for Neutron - # router. This will be removed once the router create operation - # becomes an asynchronous task - neutron_router_id = str(uuid.uuid4()) - r['id'] = neutron_router_id - lrouter = self._create_lrouter(context, r, nexthop) - # Update 'distributed' with value returned from NSX - # This will be useful for setting the value if the API request - # did not specify any value for the 'distributed' attribute - # Platforms older than 3.x do not support the attribute - r['distributed'] = lrouter.get('distributed', False) - # TODO(salv-orlando): Deal with backend object removal in case - # of db failures - with context.session.begin(subtransactions=True): - # Transaction nesting is needed to avoid foreign key violations - # when processing the distributed router binding - with context.session.begin(subtransactions=True): - router_db = l3_db.Router(id=neutron_router_id, - tenant_id=tenant_id, - name=r['name'], - admin_state_up=r['admin_state_up'], - status=lrouter['status']) - context.session.add(router_db) - self._process_nsx_router_create(context, router_db, r) - # Ensure neutron router is moved into the transaction's buffer - context.session.flush() - # Add mapping between neutron and nsx identifiers - nsx_db.add_neutron_nsx_router_mapping( - context.session, router_db['id'], lrouter['uuid']) - - if has_gw_info: - # NOTE(salv-orlando): This operation has been moved out of the - # database transaction since it performs several NSX queries, - # ithis ncreasing the risk of deadlocks between eventlet and - # sqlalchemy operations. - # Set external gateway and remove router in case of failure - try: - self._update_router_gw_info(context, router_db['id'], gw_info) - except (n_exc.NeutronException, api_exc.NsxApiException): - with excutils.save_and_reraise_exception(): - # As setting gateway failed, the router must be deleted - # in order to ensure atomicity - router_id = router_db['id'] - LOG.warn(_("Failed to set gateway info for router being " - "created:%s - removing router"), router_id) - self.delete_router(context, router_id) - LOG.info(_("Create router failed while setting external " - "gateway. Router:%s has been removed from " - "DB and backend"), - router_id) - return self._make_router_dict(router_db) - - def _update_lrouter(self, context, router_id, name, nexthop, routes=None): - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - return routerlib.update_lrouter( - self.cluster, nsx_router_id, name, - nexthop, routes=routes) - - def _update_lrouter_routes(self, context, router_id, routes): - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - routerlib.update_explicit_routes_lrouter( - self.cluster, nsx_router_id, routes) - - def update_router(self, context, router_id, router): - # Either nexthop is updated or should be kept as it was before - r = router['router'] - nexthop = None - if 'external_gateway_info' in r and r.get('external_gateway_info'): - gw_info = r['external_gateway_info'] - # The following DB read will be performed again when updating - # gateway info. This is not great, but still better than - # creating NSX router here and updating it later - network_id = (gw_info.get('network_id', None) if gw_info - else None) - if network_id: - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise n_exc.BadRequest(resource='router', msg=msg) - if ext_net.subnets: - ext_subnet = ext_net.subnets[0] - nexthop = ext_subnet.gateway_ip - try: - for route in r.get('routes', []): - if route['destination'] == '0.0.0.0/0': - msg = _("'routes' cannot contain route '0.0.0.0/0', " - "this must be updated through the default " - "gateway attribute") - raise n_exc.BadRequest(resource='router', msg=msg) - previous_routes = self._update_lrouter( - context, router_id, r.get('name'), - nexthop, routes=r.get('routes')) - # NOTE(salv-orlando): The exception handling below is not correct, but - # unfortunately nsxlib raises a neutron notfound exception when an - # object is not found in the underlying backend - except n_exc.NotFound: - # Put the router in ERROR status - with context.session.begin(subtransactions=True): - router_db = self._get_router(context, router_id) - router_db['status'] = constants.NET_STATUS_ERROR - raise nsx_exc.NsxPluginException( - err_msg=_("Logical router %s not found " - "on NSX Platform") % router_id) - except api_exc.NsxApiException: - raise nsx_exc.NsxPluginException( - err_msg=_("Unable to update logical router on NSX Platform")) - except nsx_exc.InvalidVersion: - msg = _("Request cannot contain 'routes' with the NSX " - "platform currently in execution. Please, try " - "without specifying the static routes.") - LOG.exception(msg) - raise n_exc.BadRequest(resource='router', msg=msg) - try: - return super(NsxPluginV2, self).update_router(context, - router_id, router) - except (extraroute.InvalidRoutes, - extraroute.RouterInterfaceInUseByRoute, - extraroute.RoutesExhausted): - with excutils.save_and_reraise_exception(): - # revert changes made to NSX - self._update_lrouter_routes( - context, router_id, previous_routes) - - def _delete_lrouter(self, context, router_id, nsx_router_id): - # The neutron router id (router_id) is ignored in this routine, - # but used in plugins deriving from this one - routerlib.delete_lrouter(self.cluster, nsx_router_id) - - def delete_router(self, context, router_id): - with context.session.begin(subtransactions=True): - # TODO(salv-orlando): This call should have no effect on delete - # router, but if it does, it should not happen within a - # transaction, and it should be restored on rollback - self.handle_router_metadata_access( - context, router_id, interface=None) - # Pre-delete checks - # NOTE(salv-orlando): These checks will be repeated anyway when - # calling the superclass. This is wasteful, but is the simplest - # way of ensuring a consistent removal of the router both in - # the neutron Database and in the NSX backend. - # TODO(salv-orlando): split pre-delete checks and actual - # deletion in superclass. - - # Ensure that the router is not used - fips = self.get_floatingips_count( - context.elevated(), filters={'router_id': [router_id]}) - if fips: - raise l3.RouterInUse(router_id=router_id) - - device_filter = {'device_id': [router_id], - 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} - ports = self._core_plugin.get_ports_count(context.elevated(), - filters=device_filter) - if ports: - raise l3.RouterInUse(router_id=router_id) - - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - # It is safe to remove the router from the database, so remove it - # from the backend - try: - self._delete_lrouter(context, router_id, nsx_router_id) - except n_exc.NotFound: - # This is not a fatal error, but needs to be logged - LOG.warning(_("Logical router '%s' not found " - "on NSX Platform"), router_id) - except api_exc.NsxApiException: - raise nsx_exc.NsxPluginException( - err_msg=(_("Unable to delete logical router '%s' " - "on NSX Platform") % nsx_router_id)) - # Remove the NSX mapping first in order to ensure a mapping to - # a non-existent NSX router is not left in the DB in case of - # failure while removing the router from the neutron DB - try: - nsx_db.delete_neutron_nsx_router_mapping( - context.session, router_id) - except db_exc.DBError as d_exc: - # Do not make this error fatal - LOG.warn(_("Unable to remove NSX mapping for Neutron router " - "%(router_id)s because of the following exception:" - "%(d_exc)s"), {'router_id': router_id, - 'd_exc': str(d_exc)}) - # Perform the actual delete on the Neutron DB - super(NsxPluginV2, self).delete_router(context, router_id) - - def _add_subnet_snat_rule(self, context, router, subnet): - gw_port = router.gw_port - if gw_port and router.enable_snat: - # There is a change gw_port might have multiple IPs - # In that case we will consider only the first one - if gw_port.get('fixed_ips'): - snat_ip = gw_port['fixed_ips'][0]['ip_address'] - cidr_prefix = int(subnet['cidr'].split('/')[1]) - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router['id']) - routerlib.create_lrouter_snat_rule( - self.cluster, nsx_router_id, snat_ip, snat_ip, - order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, - match_criteria={'source_ip_addresses': subnet['cidr']}) - - def _delete_subnet_snat_rule(self, context, router, subnet): - # Remove SNAT rule if external gateway is configured - if router.gw_port: - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router['id']) - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "SourceNatRule", - max_num_expected=1, min_num_expected=1, - source_ip_addresses=subnet['cidr']) - - def add_router_interface(self, context, router_id, interface_info): - # When adding interface by port_id we need to create the - # peer port on the nsx logical router in this routine - port_id = interface_info.get('port_id') - router_iface_info = super(NsxPluginV2, self).add_router_interface( - context, router_id, interface_info) - # router_iface_info will always have a subnet_id attribute - subnet_id = router_iface_info['subnet_id'] - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - if port_id: - port_data = self._get_port(context, port_id) - nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( - context.session, self.cluster, port_id) - # Unplug current attachment from lswitch port - switchlib.plug_vif_interface(self.cluster, nsx_switch_id, - nsx_port_id, "NoAttachment") - # Create logical router port and plug patch attachment - self._create_and_attach_router_port( - self.cluster, context, nsx_router_id, port_data, - "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id]) - subnet = self._get_subnet(context, subnet_id) - # If there is an external gateway we need to configure the SNAT rule. - # Fetch router from DB - router = self._get_router(context, router_id) - self._add_subnet_snat_rule(context, router, subnet) - routerlib.create_lrouter_nosnat_rule( - self.cluster, nsx_router_id, - order=NSX_NOSNAT_RULES_ORDER, - match_criteria={'destination_ip_addresses': subnet['cidr']}) - - # Ensure the NSX logical router has a connection to a 'metadata access' - # network (with a proxy listening on its DHCP port), by creating it - # if needed. - self.handle_router_metadata_access( - context, router_id, interface=router_iface_info) - LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s " - "and router:%(router_id)s"), - {'subnet_id': subnet_id, 'router_id': router_id}) - return router_iface_info - - def remove_router_interface(self, context, router_id, interface_info): - # The code below is duplicated from base class, but comes handy - # as we need to retrieve the router port id before removing the port - subnet = None - subnet_id = None - if 'port_id' in interface_info: - port_id = interface_info['port_id'] - # find subnet_id - it is need for removing the SNAT rule - port = self._get_port(context, port_id) - if port.get('fixed_ips'): - subnet_id = port['fixed_ips'][0]['subnet_id'] - if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and - port['device_id'] == router_id): - raise l3.RouterInterfaceNotFound(router_id=router_id, - port_id=port_id) - elif 'subnet_id' in interface_info: - subnet_id = interface_info['subnet_id'] - subnet = self._get_subnet(context, subnet_id) - rport_qry = context.session.query(models_v2.Port) - ports = rport_qry.filter_by( - device_id=router_id, - device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, - network_id=subnet['network_id']) - for p in ports: - if p['fixed_ips'][0]['subnet_id'] == subnet_id: - port_id = p['id'] - break - else: - raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, - subnet_id=subnet_id) - # Finally remove the data from the Neutron DB - # This will also destroy the port on the logical switch - info = super(NsxPluginV2, self).remove_router_interface( - context, router_id, interface_info) - - try: - # Ensure the connection to the 'metadata access network' - # is removed (with the network) if this the last subnet - # on the router - self.handle_router_metadata_access( - context, router_id, interface=info) - if not subnet: - subnet = self._get_subnet(context, subnet_id) - router = self._get_router(context, router_id) - # If router is enabled_snat = False there are no snat rules to - # delete. - if router.enable_snat: - self._delete_subnet_snat_rule(context, router, subnet) - # Relax the minimum expected number as the nosnat rules - # do not exist in 2.x deployments - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "NoSourceNatRule", - max_num_expected=1, min_num_expected=0, - destination_ip_addresses=subnet['cidr']) - except n_exc.NotFound: - LOG.error(_("Logical router resource %s not found " - "on NSX platform") % router_id) - except api_exc.NsxApiException: - raise nsx_exc.NsxPluginException( - err_msg=(_("Unable to update logical router" - "on NSX Platform"))) - return info - - def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, - internal_ip, nsx_router_id, - min_num_rules_expected=0): - """Finds and removes NAT rules from a NSX router.""" - # NOTE(salv-orlando): The context parameter is ignored in this method - # but used by derived classes - try: - # Remove DNAT rule for the floating IP - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "DestinationNatRule", - max_num_expected=1, - min_num_expected=min_num_rules_expected, - destination_ip_addresses=floating_ip_address) - - # Remove SNAT rules for the floating IP - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "SourceNatRule", - max_num_expected=1, - min_num_expected=min_num_rules_expected, - source_ip_addresses=internal_ip) - routerlib.delete_nat_rules_by_match( - self.cluster, nsx_router_id, "SourceNatRule", - max_num_expected=1, - min_num_expected=min_num_rules_expected, - destination_ip_addresses=internal_ip) - - except api_exc.NsxApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("An error occurred while removing NAT rules " - "on the NSX platform for floating ip:%s"), - floating_ip_address) - except nsx_exc.NatRuleMismatch: - # Do not surface to the user - LOG.warning(_("An incorrect number of matching NAT rules " - "was found on the NSX platform")) - - def _remove_floatingip_address(self, context, fip_db): - # Remove floating IP address from logical router port - # Fetch logical port of router's external gateway - router_id = fip_db.router_id - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - nsx_gw_port_id = routerlib.find_router_gw_port( - context, self.cluster, nsx_router_id)['uuid'] - ext_neutron_port_db = self._get_port(context.elevated(), - fip_db.floating_port_id) - nsx_floating_ips = self._build_ip_address_list( - context.elevated(), ext_neutron_port_db['fixed_ips']) - routerlib.update_lrouter_port_ips(self.cluster, - nsx_router_id, - nsx_gw_port_id, - ips_to_add=[], - ips_to_remove=nsx_floating_ips) - - def _get_fip_assoc_data(self, context, fip, floatingip_db): - if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and - not ('port_id' in fip and fip['port_id'])): - msg = _("fixed_ip_address cannot be specified without a port_id") - raise n_exc.BadRequest(resource='floatingip', msg=msg) - port_id = internal_ip = router_id = None - if 'port_id' in fip and fip['port_id']: - fip_qry = context.session.query(l3_db.FloatingIP) - port_id, internal_ip, router_id = self.get_assoc_data( - context, - fip, - floatingip_db['floating_network_id']) - try: - fip_qry.filter_by( - fixed_port_id=fip['port_id'], - floating_network_id=floatingip_db['floating_network_id'], - fixed_ip_address=internal_ip).one() - raise l3.FloatingIPPortAlreadyAssociated( - port_id=fip['port_id'], - fip_id=floatingip_db['id'], - floating_ip_address=floatingip_db['floating_ip_address'], - fixed_ip=floatingip_db['fixed_ip_address'], - net_id=floatingip_db['floating_network_id']) - except sa_exc.NoResultFound: - pass - return (port_id, internal_ip, router_id) - - def _update_fip_assoc(self, context, fip, floatingip_db, external_port): - """Update floating IP association data. - - Overrides method from base class. - The method is augmented for creating NAT rules in the process. - """ - # Store router currently serving the floating IP - old_router_id = floatingip_db.router_id - port_id, internal_ip, router_id = self._get_fip_assoc_data( - context, fip, floatingip_db) - floating_ip = floatingip_db['floating_ip_address'] - # If there's no association router_id will be None - if router_id: - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, router_id) - self._retrieve_and_delete_nat_rules( - context, floating_ip, internal_ip, nsx_router_id) - # Fetch logical port of router's external gateway - # Fetch logical port of router's external gateway - nsx_floating_ips = self._build_ip_address_list( - context.elevated(), external_port['fixed_ips']) - floating_ip = floatingip_db['floating_ip_address'] - # Retrieve and delete existing NAT rules, if any - if old_router_id: - nsx_old_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, old_router_id) - # Retrieve the current internal ip - _p, _s, old_internal_ip = self._internal_fip_assoc_data( - context, {'id': floatingip_db.id, - 'port_id': floatingip_db.fixed_port_id, - 'fixed_ip_address': floatingip_db.fixed_ip_address, - 'tenant_id': floatingip_db.tenant_id}) - nsx_gw_port_id = routerlib.find_router_gw_port( - context, self.cluster, nsx_old_router_id)['uuid'] - self._retrieve_and_delete_nat_rules( - context, floating_ip, old_internal_ip, nsx_old_router_id) - routerlib.update_lrouter_port_ips( - self.cluster, nsx_old_router_id, nsx_gw_port_id, - ips_to_add=[], ips_to_remove=nsx_floating_ips) - - if router_id: - nsx_gw_port_id = routerlib.find_router_gw_port( - context, self.cluster, nsx_router_id)['uuid'] - # Re-create NAT rules only if a port id is specified - if fip.get('port_id'): - try: - # Setup DNAT rules for the floating IP - routerlib.create_lrouter_dnat_rule( - self.cluster, nsx_router_id, internal_ip, - order=NSX_FLOATINGIP_NAT_RULES_ORDER, - match_criteria={'destination_ip_addresses': - floating_ip}) - # Setup SNAT rules for the floating IP - # Create a SNAT rule for enabling connectivity to the - # floating IP from the same network as the internal port - # Find subnet id for internal_ip from fixed_ips - internal_port = self._get_port(context, port_id) - # Cchecks not needed on statements below since otherwise - # _internal_fip_assoc_data would have raised - subnet_ids = [ip['subnet_id'] for ip in - internal_port['fixed_ips'] if - ip['ip_address'] == internal_ip] - internal_subnet_cidr = self._build_ip_address_list( - context, internal_port['fixed_ips'], - subnet_ids=subnet_ids)[0] - routerlib.create_lrouter_snat_rule( - self.cluster, nsx_router_id, floating_ip, floating_ip, - order=NSX_NOSNAT_RULES_ORDER - 1, - match_criteria={'source_ip_addresses': - internal_subnet_cidr, - 'destination_ip_addresses': - internal_ip}) - # setup snat rule such that src ip of a IP packet when - # using floating is the floating ip itself. - routerlib.create_lrouter_snat_rule( - self.cluster, nsx_router_id, floating_ip, floating_ip, - order=NSX_FLOATINGIP_NAT_RULES_ORDER, - match_criteria={'source_ip_addresses': internal_ip}) - - # Add Floating IP address to router_port - routerlib.update_lrouter_port_ips( - self.cluster, nsx_router_id, nsx_gw_port_id, - ips_to_add=nsx_floating_ips, ips_to_remove=[]) - except api_exc.NsxApiException: - LOG.exception(_("An error occurred while creating NAT " - "rules on the NSX platform for floating " - "ip:%(floating_ip)s mapped to " - "internal ip:%(internal_ip)s"), - {'floating_ip': floating_ip, - 'internal_ip': internal_ip}) - msg = _("Failed to update NAT rules for floatingip update") - raise nsx_exc.NsxPluginException(err_msg=msg) - - floatingip_db.update({'fixed_ip_address': internal_ip, - 'fixed_port_id': port_id, - 'router_id': router_id}) - - def delete_floatingip(self, context, id): - fip_db = self._get_floatingip(context, id) - # Check whether the floating ip is associated or not - if fip_db.fixed_port_id: - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, fip_db.router_id) - self._retrieve_and_delete_nat_rules(context, - fip_db.floating_ip_address, - fip_db.fixed_ip_address, - nsx_router_id, - min_num_rules_expected=1) - # Remove floating IP address from logical router port - self._remove_floatingip_address(context, fip_db) - return super(NsxPluginV2, self).delete_floatingip(context, id) - - def disassociate_floatingips(self, context, port_id): - try: - fip_qry = context.session.query(l3_db.FloatingIP) - fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) - - for fip_db in fip_dbs: - nsx_router_id = nsx_utils.get_nsx_router_id( - context.session, self.cluster, fip_db.router_id) - self._retrieve_and_delete_nat_rules(context, - fip_db.floating_ip_address, - fip_db.fixed_ip_address, - nsx_router_id, - min_num_rules_expected=1) - self._remove_floatingip_address(context, fip_db) - except sa_exc.NoResultFound: - LOG.debug(_("The port '%s' is not associated with floating IPs"), - port_id) - except n_exc.NotFound: - LOG.warning(_("Nat rules not found in nsx for port: %s"), id) - - super(NsxPluginV2, self).disassociate_floatingips(context, port_id) - - def create_network_gateway(self, context, network_gateway): - """Create a layer-2 network gateway. - - Create the gateway service on NSX platform and corresponding data - structures in Neutron datase. - """ - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - # Need to re-do authZ checks here in order to avoid creation on NSX - gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME] - tenant_id = self._get_tenant_id_for_create(context, gw_data) - devices = gw_data['devices'] - # Populate default physical network where not specified - for device in devices: - if not device.get('interface_name'): - device['interface_name'] = self.cluster.default_interface_name - try: - # Replace Neutron device identifiers with NSX identifiers - dev_map = dict((dev['id'], dev['interface_name']) for - dev in devices) - nsx_devices = [] - for db_device in self._query_gateway_devices( - context, filters={'id': [device['id'] for device in devices]}): - nsx_devices.append( - {'id': db_device['nsx_id'], - 'interface_name': dev_map[db_device['id']]}) - nsx_res = l2gwlib.create_l2_gw_service( - self.cluster, tenant_id, gw_data['name'], nsx_devices) - nsx_uuid = nsx_res.get('uuid') - except api_exc.Conflict: - raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name']) - except api_exc.NsxApiException: - err_msg = _("Unable to create l2_gw_service for: %s") % gw_data - LOG.exception(err_msg) - raise nsx_exc.NsxPluginException(err_msg=err_msg) - gw_data['id'] = nsx_uuid - return super(NsxPluginV2, self).create_network_gateway( - context, network_gateway) - - def delete_network_gateway(self, context, gateway_id): - """Remove a layer-2 network gateway. - - Remove the gateway service from NSX platform and corresponding data - structures in Neutron datase. - """ - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - with context.session.begin(subtransactions=True): - try: - super(NsxPluginV2, self).delete_network_gateway( - context, gateway_id) - l2gwlib.delete_l2_gw_service(self.cluster, gateway_id) - except api_exc.ResourceNotFound: - # Do not cause a 500 to be returned to the user if - # the corresponding NSX resource does not exist - LOG.exception(_("Unable to remove gateway service from " - "NSX plaform - the resource was not found")) - - def get_network_gateway(self, context, id, fields=None): - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - return super(NsxPluginV2, self).get_network_gateway(context, - id, fields) - - def get_network_gateways(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - # Ensure the tenant_id attribute is populated on returned gateways - return super(NsxPluginV2, self).get_network_gateways( - context, filters, fields, sorts, limit, marker, page_reverse) - - def update_network_gateway(self, context, id, network_gateway): - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - # Update gateway on backend when there's a name change - name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name') - if name: - try: - l2gwlib.update_l2_gw_service(self.cluster, id, name) - except api_exc.NsxApiException: - # Consider backend failures as non-fatal, but still warn - # because this might indicate something dodgy is going on - LOG.warn(_("Unable to update name on NSX backend " - "for network gateway: %s"), id) - return super(NsxPluginV2, self).update_network_gateway( - context, id, network_gateway) - - def connect_network(self, context, network_gateway_id, - network_mapping_info): - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - try: - return super(NsxPluginV2, self).connect_network( - context, network_gateway_id, network_mapping_info) - except api_exc.Conflict: - raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id) - - def disconnect_network(self, context, network_gateway_id, - network_mapping_info): - # Ensure the default gateway in the config file is in sync with the db - self._ensure_default_network_gateway() - return super(NsxPluginV2, self).disconnect_network( - context, network_gateway_id, network_mapping_info) - - def _get_nsx_device_id(self, context, device_id): - return self._get_gateway_device(context, device_id)['nsx_id'] - - def _rollback_gw_device(self, context, device_id, - gw_data=None, new_status=None, - is_create=False, log_level=logging.ERROR): - LOG.log(log_level, - _("Rolling back database changes for gateway device %s " - "because of an error in the NSX backend"), device_id) - with context.session.begin(subtransactions=True): - query = self._model_query( - context, networkgw_db.NetworkGatewayDevice).filter( - networkgw_db.NetworkGatewayDevice.id == device_id) - if is_create: - query.delete(synchronize_session=False) - else: - super(NsxPluginV2, self).update_gateway_device( - context, device_id, - {networkgw.DEVICE_RESOURCE_NAME: gw_data}) - if new_status: - query.update({'status': new_status}, - synchronize_session=False) - - # TODO(salv-orlando): Handlers for Gateway device operations should be - # moved into the appropriate nsx_handlers package once the code for the - # blueprint nsx-async-backend-communication merges - def create_gateway_device_handler(self, context, gateway_device, - client_certificate): - neutron_id = gateway_device['id'] - try: - nsx_res = l2gwlib.create_gateway_device( - self.cluster, - gateway_device['tenant_id'], - gateway_device['name'], - neutron_id, - self.cluster.default_tz_uuid, - gateway_device['connector_type'], - gateway_device['connector_ip'], - client_certificate) - - # Fetch status (it needs another NSX API call) - device_status = nsx_utils.get_nsx_device_status(self.cluster, - nsx_res['uuid']) - - # set NSX GW device in neutron database and update status - with context.session.begin(subtransactions=True): - query = self._model_query( - context, networkgw_db.NetworkGatewayDevice).filter( - networkgw_db.NetworkGatewayDevice.id == neutron_id) - query.update({'status': device_status, - 'nsx_id': nsx_res['uuid']}, - synchronize_session=False) - LOG.debug(_("Neutron gateway device: %(neutron_id)s; " - "NSX transport node identifier: %(nsx_id)s; " - "Operational status: %(status)s."), - {'neutron_id': neutron_id, - 'nsx_id': nsx_res['uuid'], - 'status': device_status}) - return device_status - except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): - with excutils.save_and_reraise_exception(): - self._rollback_gw_device(context, neutron_id, is_create=True) - - def update_gateway_device_handler(self, context, gateway_device, - old_gateway_device_data, - client_certificate): - nsx_id = gateway_device['nsx_id'] - neutron_id = gateway_device['id'] - try: - l2gwlib.update_gateway_device( - self.cluster, - nsx_id, - gateway_device['tenant_id'], - gateway_device['name'], - neutron_id, - self.cluster.default_tz_uuid, - gateway_device['connector_type'], - gateway_device['connector_ip'], - client_certificate) - - # Fetch status (it needs another NSX API call) - device_status = nsx_utils.get_nsx_device_status(self.cluster, - nsx_id) - # update status - with context.session.begin(subtransactions=True): - query = self._model_query( - context, networkgw_db.NetworkGatewayDevice).filter( - networkgw_db.NetworkGatewayDevice.id == neutron_id) - query.update({'status': device_status}, - synchronize_session=False) - LOG.debug(_("Neutron gateway device: %(neutron_id)s; " - "NSX transport node identifier: %(nsx_id)s; " - "Operational status: %(status)s."), - {'neutron_id': neutron_id, - 'nsx_id': nsx_id, - 'status': device_status}) - return device_status - except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): - with excutils.save_and_reraise_exception(): - self._rollback_gw_device(context, neutron_id, - gw_data=old_gateway_device_data) - except n_exc.NotFound: - # The gateway device was probably deleted in the backend. - # The DB change should be rolled back and the status must - # be put in error - with excutils.save_and_reraise_exception(): - self._rollback_gw_device(context, neutron_id, - gw_data=old_gateway_device_data, - new_status=networkgw_db.ERROR) - - def get_gateway_device(self, context, device_id, fields=None): - # Get device from database - gw_device = super(NsxPluginV2, self).get_gateway_device( - context, device_id, fields, include_nsx_id=True) - # Fetch status from NSX - nsx_id = gw_device['nsx_id'] - device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) - # TODO(salv-orlando): Asynchronous sync for gateway device status - # Update status in database - with context.session.begin(subtransactions=True): - query = self._model_query( - context, networkgw_db.NetworkGatewayDevice).filter( - networkgw_db.NetworkGatewayDevice.id == device_id) - query.update({'status': device_status}, - synchronize_session=False) - gw_device['status'] = device_status - return gw_device - - def get_gateway_devices(self, context, filters=None, fields=None, - sorts=None, limit=None, marker=None, - page_reverse=False): - # Get devices from database - devices = super(NsxPluginV2, self).get_gateway_devices( - context, filters, fields, include_nsx_id=True) - # Fetch operational status from NSX, filter by tenant tag - # TODO(salv-orlando): Asynchronous sync for gateway device status - tenant_id = context.tenant_id if not context.is_admin else None - nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster, - tenant_id) - # Update statuses in database - with context.session.begin(subtransactions=True): - for device in devices: - new_status = nsx_statuses.get(device['nsx_id']) - if new_status: - device['status'] = new_status - return devices - - def create_gateway_device(self, context, gateway_device): - # NOTE(salv-orlando): client-certificate will not be stored - # in the database - device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME] - client_certificate = device_data.pop('client_certificate') - gw_device = super(NsxPluginV2, self).create_gateway_device( - context, gateway_device) - # DB operation was successful, perform NSX operation - gw_device['status'] = self.create_gateway_device_handler( - context, gw_device, client_certificate) - return gw_device - - def update_gateway_device(self, context, device_id, - gateway_device): - # NOTE(salv-orlando): client-certificate will not be stored - # in the database - client_certificate = ( - gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop( - 'client_certificate', None)) - # Retrive current state from DB in case a rollback should be needed - old_gw_device_data = super(NsxPluginV2, self).get_gateway_device( - context, device_id, include_nsx_id=True) - gw_device = super(NsxPluginV2, self).update_gateway_device( - context, device_id, gateway_device, include_nsx_id=True) - # DB operation was successful, perform NSX operation - gw_device['status'] = self.update_gateway_device_handler( - context, gw_device, old_gw_device_data, client_certificate) - gw_device.pop('nsx_id') - return gw_device - - def delete_gateway_device(self, context, device_id): - nsx_device_id = self._get_nsx_device_id(context, device_id) - super(NsxPluginV2, self).delete_gateway_device( - context, device_id) - # DB operation was successful, peform NSX operation - # TODO(salv-orlando): State consistency with neutron DB - # should be ensured even in case of backend failures - try: - l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) - except n_exc.NotFound: - LOG.warn(_("Removal of gateway device: %(neutron_id)s failed on " - "NSX backend (NSX id:%(nsx_id)s) because the NSX " - "resource was not found"), - {'neutron_id': device_id, 'nsx_id': nsx_device_id}) - except api_exc.NsxApiException: - with excutils.save_and_reraise_exception(): - # In this case a 500 should be returned - LOG.exception(_("Removal of gateway device: %(neutron_id)s " - "failed on NSX backend (NSX id:%(nsx_id)s). " - "Neutron and NSX states have diverged."), - {'neutron_id': device_id, - 'nsx_id': nsx_device_id}) - - def create_security_group(self, context, security_group, default_sg=False): - """Create security group. - - If default_sg is true that means we are creating a default security - group and we don't need to check if one exists. - """ - s = security_group.get('security_group') - - tenant_id = self._get_tenant_id_for_create(context, s) - if not default_sg: - self._ensure_default_security_group(context, tenant_id) - # NOTE(salv-orlando): Pre-generating Neutron ID for security group. - neutron_id = str(uuid.uuid4()) - nsx_secgroup = secgrouplib.create_security_profile( - self.cluster, tenant_id, neutron_id, s) - with context.session.begin(subtransactions=True): - s['id'] = neutron_id - sec_group = super(NsxPluginV2, self).create_security_group( - context, security_group, default_sg) - context.session.flush() - # Add mapping between neutron and nsx identifiers - nsx_db.add_neutron_nsx_security_group_mapping( - context.session, neutron_id, nsx_secgroup['uuid']) - return sec_group - - def update_security_group(self, context, secgroup_id, security_group): - secgroup = (super(NsxPluginV2, self). - update_security_group(context, - secgroup_id, - security_group)) - if ('name' in security_group['security_group'] and - secgroup['name'] != 'default'): - nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( - context.session, self.cluster, secgroup_id) - try: - name = security_group['security_group']['name'] - secgrouplib.update_security_profile( - self.cluster, nsx_sec_profile_id, name) - except (n_exc.NotFound, api_exc.NsxApiException) as e: - # Reverting the DB change is not really worthwhile - # for a mismatch between names. It's the rules that - # we care about. - LOG.error(_('Error while updating security profile ' - '%(uuid)s with name %(name)s: %(error)s.') - % {'uuid': secgroup_id, 'name': name, 'error': e}) - return secgroup - - def delete_security_group(self, context, security_group_id): - """Delete a security group. - - :param security_group_id: security group rule to remove. - """ - with context.session.begin(subtransactions=True): - security_group = super(NsxPluginV2, self).get_security_group( - context, security_group_id) - if not security_group: - raise ext_sg.SecurityGroupNotFound(id=security_group_id) - - if security_group['name'] == 'default' and not context.is_admin: - raise ext_sg.SecurityGroupCannotRemoveDefault() - - filters = {'security_group_id': [security_group['id']]} - if super(NsxPluginV2, self)._get_port_security_group_bindings( - context, filters): - raise ext_sg.SecurityGroupInUse(id=security_group['id']) - nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( - context.session, self.cluster, security_group_id) - - try: - secgrouplib.delete_security_profile( - self.cluster, nsx_sec_profile_id) - except n_exc.NotFound: - # The security profile was not found on the backend - # do not fail in this case. - LOG.warning(_("The NSX security profile %(sec_profile_id)s, " - "associated with the Neutron security group " - "%(sec_group_id)s was not found on the backend"), - {'sec_profile_id': nsx_sec_profile_id, - 'sec_group_id': security_group_id}) - except api_exc.NsxApiException: - # Raise and fail the operation, as there is a problem which - # prevented the sec group from being removed from the backend - LOG.exception(_("An exception occurred while removing the " - "NSX security profile %(sec_profile_id)s, " - "associated with Netron security group " - "%(sec_group_id)s"), - {'sec_profile_id': nsx_sec_profile_id, - 'sec_group_id': security_group_id}) - raise nsx_exc.NsxPluginException( - _("Unable to remove security group %s from backend"), - security_group['id']) - return super(NsxPluginV2, self).delete_security_group( - context, security_group_id) - - def _validate_security_group_rules(self, context, rules): - for rule in rules['security_group_rules']: - r = rule.get('security_group_rule') - port_based_proto = (self._get_ip_proto_number(r['protocol']) - in securitygroups_db.IP_PROTOCOL_MAP.values()) - if (not port_based_proto and - (r['port_range_min'] is not None or - r['port_range_max'] is not None)): - msg = (_("Port values not valid for " - "protocol: %s") % r['protocol']) - raise n_exc.BadRequest(resource='security_group_rule', - msg=msg) - return super(NsxPluginV2, self)._validate_security_group_rules(context, - rules) - - def create_security_group_rule(self, context, security_group_rule): - """Create a single security group rule.""" - bulk_rule = {'security_group_rules': [security_group_rule]} - return self.create_security_group_rule_bulk(context, bulk_rule)[0] - - def create_security_group_rule_bulk(self, context, security_group_rule): - """Create security group rules. - - :param security_group_rule: list of rules to create - """ - s = security_group_rule.get('security_group_rules') - tenant_id = self._get_tenant_id_for_create(context, s) - - # TODO(arosen) is there anyway we could avoid having the update of - # the security group rules in nsx outside of this transaction? - with context.session.begin(subtransactions=True): - self._ensure_default_security_group(context, tenant_id) - security_group_id = self._validate_security_group_rules( - context, security_group_rule) - # Check to make sure security group exists - security_group = super(NsxPluginV2, self).get_security_group( - context, security_group_id) - - if not security_group: - raise ext_sg.SecurityGroupNotFound(id=security_group_id) - # Check for duplicate rules - self._check_for_duplicate_rules(context, s) - # gather all the existing security group rules since we need all - # of them to PUT to NSX. - existing_rules = self.get_security_group_rules( - context, {'security_group_id': [security_group['id']]}) - combined_rules = sg_utils.merge_security_group_rules_with_current( - context.session, self.cluster, s, existing_rules) - nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( - context.session, self.cluster, security_group_id) - secgrouplib.update_security_group_rules(self.cluster, - nsx_sec_profile_id, - combined_rules) - return super( - NsxPluginV2, self).create_security_group_rule_bulk_native( - context, security_group_rule) - - def delete_security_group_rule(self, context, sgrid): - """Delete a security group rule - :param sgrid: security group id to remove. - """ - with context.session.begin(subtransactions=True): - # determine security profile id - security_group_rule = ( - super(NsxPluginV2, self).get_security_group_rule( - context, sgrid)) - if not security_group_rule: - raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) - - sgid = security_group_rule['security_group_id'] - current_rules = self.get_security_group_rules( - context, {'security_group_id': [sgid]}) - current_rules_nsx = sg_utils.get_security_group_rules_nsx_format( - context.session, self.cluster, current_rules, True) - - sg_utils.remove_security_group_with_id_and_id_field( - current_rules_nsx, sgrid) - nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( - context.session, self.cluster, sgid) - secgrouplib.update_security_group_rules( - self.cluster, nsx_sec_profile_id, current_rules_nsx) - return super(NsxPluginV2, self).delete_security_group_rule(context, - sgrid) - - def create_qos_queue(self, context, qos_queue, check_policy=True): - q = qos_queue.get('qos_queue') - self._validate_qos_queue(context, q) - q['id'] = queuelib.create_lqueue(self.cluster, q) - return super(NsxPluginV2, self).create_qos_queue(context, qos_queue) - - def delete_qos_queue(self, context, queue_id, raise_in_use=True): - filters = {'queue_id': [queue_id]} - queues = self._get_port_queue_bindings(context, filters) - if queues: - if raise_in_use: - raise qos.QueueInUseByPort() - else: - return - queuelib.delete_lqueue(self.cluster, queue_id) - return super(NsxPluginV2, self).delete_qos_queue(context, queue_id) diff --git a/neutron/plugins/vmware/plugins/service.py b/neutron/plugins/vmware/plugins/service.py deleted file mode 100644 index 4a5b8e957..000000000 --- a/neutron/plugins/vmware/plugins/service.py +++ /dev/null @@ -1,1812 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import netaddr -from oslo.config import cfg - -from neutron.common import exceptions as n_exc -from neutron.db.firewall import firewall_db -from neutron.db import l3_db -from neutron.db.loadbalancer import loadbalancer_db -from neutron.db import routedserviceinsertion_db as rsi_db -from neutron.db.vpn import vpn_db -from neutron.extensions import firewall as fw_ext -from neutron.extensions import l3 -from neutron.extensions import routedserviceinsertion as rsi -from neutron.extensions import vpnaas as vpn_ext -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants as service_constants -from neutron.plugins.vmware.api_client import exception as api_exc -from neutron.plugins.vmware.common import config # noqa -from neutron.plugins.vmware.common import exceptions as nsx_exc -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware.dbexts import servicerouter as sr_db -from neutron.plugins.vmware.dbexts import vcns_db -from neutron.plugins.vmware.dbexts import vcns_models -from neutron.plugins.vmware.extensions import servicerouter as sr -from neutron.plugins.vmware.nsxlib import router as routerlib -from neutron.plugins.vmware.nsxlib import switch as switchlib -from neutron.plugins.vmware.plugins import base -from neutron.plugins.vmware.vshield.common import constants as vcns_const -from neutron.plugins.vmware.vshield.common import exceptions -from neutron.plugins.vmware.vshield.tasks import constants as tasks_const -from neutron.plugins.vmware.vshield import vcns_driver -from sqlalchemy.orm import exc as sa_exc - -LOG = logging.getLogger(__name__) - -ROUTER_TYPE_BASIC = 1 -ROUTER_TYPE_ADVANCED = 2 - -ROUTER_STATUS = [ - service_constants.ACTIVE, - service_constants.DOWN, - service_constants.PENDING_CREATE, - service_constants.PENDING_DELETE, - service_constants.ERROR -] - -ROUTER_STATUS_LEVEL = { - service_constants.ACTIVE: vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, - service_constants.DOWN: vcns_const.RouterStatus.ROUTER_STATUS_DOWN, - service_constants.PENDING_CREATE: ( - vcns_const.RouterStatus.ROUTER_STATUS_PENDING_CREATE - ), - service_constants.PENDING_DELETE: ( - vcns_const.RouterStatus.ROUTER_STATUS_PENDING_DELETE - ), - service_constants.ERROR: vcns_const.RouterStatus.ROUTER_STATUS_ERROR -} - - -class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin, - base.NsxPluginV2, - rsi_db.RoutedServiceInsertionDbMixin, - firewall_db.Firewall_db_mixin, - loadbalancer_db.LoadBalancerPluginDb, - vpn_db.VPNPluginDb - ): - - supported_extension_aliases = ( - base.NsxPluginV2.supported_extension_aliases + [ - "service-router", - "routed-service-insertion", - "fwaas", - "lbaas", - "vpnaas" - ]) - # The service plugin cannot currently support pagination - __native_pagination_support = False - __native_sorting_support = False - - def __init__(self): - super(NsxAdvancedPlugin, self).__init__() - - self._super_create_ext_gw_port = ( - self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW]) - self._super_delete_ext_gw_port = ( - self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW]) - - self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( - self._vcns_create_ext_gw_port) - self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = ( - self._vcns_delete_ext_gw_port) - - # cache router type based on router id - self._router_type = {} - self.callbacks = VcnsCallbacks(self.safe_reference) - - # load the vCNS driver - self._load_vcns_drivers() - - # switchlib's create_lswitch needs to be replaced in order to proxy - # logical switch create requests to vcns - self._set_create_lswitch_proxy() - - def _set_create_lswitch_proxy(self): - base.switchlib.create_lswitch = self._proxy_create_lswitch - - def _proxy_create_lswitch(self, *args, **kwargs): - name, tz_config, tags = ( - _process_base_create_lswitch_args(*args, **kwargs) - ) - return self.vcns_driver.create_lswitch( - name, tz_config, tags=tags, - port_isolation=None, replication_mode=None) - - def _load_vcns_drivers(self): - self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks) - - def _set_router_type(self, router_id, router_type): - self._router_type[router_id] = router_type - - def _get_router_type(self, context=None, router_id=None, router=None): - if not router: - if router_id in self._router_type: - return self._router_type[router_id] - router = self._get_router(context, router_id) - - LOG.debug(_("EDGE: router = %s"), router) - if router['nsx_attributes']['service_router']: - router_type = ROUTER_TYPE_ADVANCED - else: - router_type = ROUTER_TYPE_BASIC - self._set_router_type(router['id'], router_type) - return router_type - - def _find_router_type(self, router): - is_service_router = router.get(sr.SERVICE_ROUTER, False) - if is_service_router: - return ROUTER_TYPE_ADVANCED - else: - return ROUTER_TYPE_BASIC - - def _is_advanced_service_router(self, context=None, router_id=None, - router=None): - if router: - router_type = self._get_router_type(router=router) - else: - router_type = self._get_router_type(context, router_id) - return (router_type == ROUTER_TYPE_ADVANCED) - - def _vcns_create_ext_gw_port(self, context, port_data): - router_id = port_data['device_id'] - if not self._is_advanced_service_router(context, router_id): - self._super_create_ext_gw_port(context, port_data) - return - - # NOP for Edge because currently the port will be create internally - # by VSM - LOG.debug(_("EDGE: _vcns_create_ext_gw_port")) - - def _vcns_delete_ext_gw_port(self, context, port_data): - router_id = port_data['device_id'] - if not self._is_advanced_service_router(context, router_id): - self._super_delete_ext_gw_port(context, port_data) - return - - # NOP for Edge - LOG.debug(_("EDGE: _vcns_delete_ext_gw_port")) - - def _get_external_attachment_info(self, context, router): - gw_port = router.gw_port - ipaddress = None - netmask = None - nexthop = None - - if gw_port: - # gw_port may have multiple IPs, only configure the first one - if gw_port.get('fixed_ips'): - ipaddress = gw_port['fixed_ips'][0]['ip_address'] - - network_id = gw_port.get('network_id') - if network_id: - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise n_exc.BadRequest(resource='router', msg=msg) - if ext_net.subnets: - ext_subnet = ext_net.subnets[0] - netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) - nexthop = ext_subnet.gateway_ip - - return (ipaddress, netmask, nexthop) - - def _get_external_gateway_address(self, context, router): - ipaddress, netmask, nexthop = self._get_external_attachment_info( - context, router) - return nexthop - - def _vcns_update_static_routes(self, context, **kwargs): - router = kwargs.get('router') - if router is None: - router = self._get_router(context, kwargs['router_id']) - - edge_id = kwargs.get('edge_id') - if edge_id is None: - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - edge_id = binding['edge_id'] - - skippable = True - if 'nexthop' in kwargs: - nexthop = kwargs['nexthop'] - # The default gateway and vnic config has dependencies, if we - # explicitly specify nexthop to change, tell the driver not to - # skip this route update - skippable = False - else: - nexthop = self._get_external_gateway_address(context, - router) - - if 'subnets' in kwargs: - subnets = kwargs['subnets'] - else: - subnets = self._find_router_subnets_cidrs(context.elevated(), - router['id']) - - routes = [] - for subnet in subnets: - routes.append({ - 'cidr': subnet, - 'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0] - }) - self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes, - skippable) - - def _get_nat_rules(self, context, router): - fip_qry = context.session.query(l3_db.FloatingIP) - fip_db = fip_qry.filter_by(router_id=router['id']).all() - - dnat = [] - snat = [] - for fip in fip_db: - if fip.fixed_port_id: - dnat.append({ - 'dst': fip.floating_ip_address, - 'translated': fip.fixed_ip_address - }) - - gw_port = router.gw_port - if gw_port and router.enable_snat: - if gw_port.get('fixed_ips'): - snat_ip = gw_port['fixed_ips'][0]['ip_address'] - subnets = self._find_router_subnets_cidrs(context.elevated(), - router['id']) - for subnet in subnets: - snat.append({ - 'src': subnet, - 'translated': snat_ip - }) - - return (snat, dnat) - - def _update_nat_rules(self, context, router): - snat, dnat = self._get_nat_rules(context, router) - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - self.vcns_driver.update_nat_rules(router['id'], - binding['edge_id'], - snat, dnat) - - def _update_interface(self, context, router, sync=False): - addr, mask, nexthop = self._get_external_attachment_info( - context, router) - - secondary = [] - fip_qry = context.session.query(l3_db.FloatingIP) - fip_db = fip_qry.filter_by(router_id=router['id']).all() - for fip in fip_db: - if fip.fixed_port_id: - secondary.append(fip.floating_ip_address) - #Add all vip addresses bound on the router - vip_addrs = self._get_all_vip_addrs_by_router_id(context, - router['id']) - secondary.extend(vip_addrs) - - binding = vcns_db.get_vcns_router_binding(context.session, - router['id']) - task = self.vcns_driver.update_interface( - router['id'], binding['edge_id'], - vcns_const.EXTERNAL_VNIC_INDEX, - self.vcns_driver.external_network, - addr, mask, secondary=secondary) - if sync: - task.wait(tasks_const.TaskState.RESULT) - - def _update_router_gw_info(self, context, router_id, info): - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._update_router_gw_info( - context, router_id, info) - return - - # get original gw_port config - router = self._get_router(context, router_id) - org_ext_net_id = router.gw_port_id and router.gw_port.network_id - org_enable_snat = router.enable_snat - orgaddr, orgmask, orgnexthop = self._get_external_attachment_info( - context, router) - - super(base.NsxPluginV2, self)._update_router_gw_info( - context, router_id, info, router=router) - - new_ext_net_id = router.gw_port_id and router.gw_port.network_id - new_enable_snat = router.enable_snat - newaddr, newmask, newnexthop = self._get_external_attachment_info( - context, router) - - binding = vcns_db.get_vcns_router_binding(context.session, router_id) - - if new_ext_net_id != org_ext_net_id and orgnexthop: - # network changed, need to remove default gateway before vnic - # can be configured - LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop) - self._vcns_update_static_routes(context, - router=router, - edge_id=binding['edge_id'], - nexthop=None) - - if orgaddr != newaddr or orgmask != newmask: - self.vcns_driver.update_interface( - router_id, binding['edge_id'], - vcns_const.EXTERNAL_VNIC_INDEX, - self.vcns_driver.external_network, - newaddr, newmask) - - if orgnexthop != newnexthop: - self._vcns_update_static_routes(context, - router=router, - edge_id=binding['edge_id'], - nexthop=newnexthop) - - if (new_ext_net_id == org_ext_net_id and - org_enable_snat == new_enable_snat): - return - - self._update_nat_rules(context, router) - - def _add_subnet_snat_rule(self, context, router, subnet): - # NOP for service router - if not self._is_advanced_service_router(router=router): - super(NsxAdvancedPlugin, self)._add_subnet_snat_rule( - context, router, subnet) - - def _delete_subnet_snat_rule(self, context, router, subnet): - # NOP for service router - if not self._is_advanced_service_router(router=router): - super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule( - context, router, subnet) - - def _remove_floatingip_address(self, context, fip_db): - # NOP for service router - router_id = fip_db.router_id - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._remove_floatingip_address( - context, fip_db) - - def _create_advanced_service_router(self, context, neutron_router_id, - name, lrouter, lswitch): - - # store binding - binding = vcns_db.add_vcns_router_binding( - context.session, neutron_router_id, None, lswitch['uuid'], - service_constants.PENDING_CREATE) - - # deploy edge - jobdata = { - 'neutron_router_id': neutron_router_id, - 'lrouter': lrouter, - 'lswitch': lswitch, - 'context': context - } - - # deploy and wait until the deploy requeste has been requested - # so we will have edge_id ready. The wait here should be fine - # as we're not in a database transaction now - self.vcns_driver.deploy_edge( - lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata, - wait_for_exec=True) - - return binding - - def _create_integration_lswitch(self, tenant_id, name): - # use defautl transport zone - transport_zone_config = [{ - "zone_uuid": self.cluster.default_tz_uuid, - "transport_type": cfg.CONF.NSX.default_transport_type - }] - return self.vcns_driver.create_lswitch(name, transport_zone_config) - - def _add_router_integration_interface(self, tenant_id, name, - lrouter, lswitch): - # create logic switch port - try: - ls_port = switchlib.create_lport( - self.cluster, lswitch['uuid'], tenant_id, - '', '', lrouter['uuid'], True) - except api_exc.NsxApiException: - msg = (_("An exception occurred while creating a port " - "on lswitch %s") % lswitch['uuid']) - LOG.exception(msg) - raise n_exc.NeutronException(message=msg) - - # create logic router port - try: - neutron_port_id = '' - pname = name[:36] + '-lp' - admin_status_enabled = True - lr_port = routerlib.create_router_lport( - self.cluster, lrouter['uuid'], tenant_id, - neutron_port_id, pname, admin_status_enabled, - [vcns_const.INTEGRATION_LR_IPADDRESS]) - except api_exc.NsxApiException: - msg = (_("Unable to create port on NSX logical router %s") % name) - LOG.exception(msg) - switchlib.delete_port( - self.cluster, lswitch['uuid'], ls_port['uuid']) - raise n_exc.NeutronException(message=msg) - - # attach logic router port to switch port - try: - self._update_router_port_attachment( - self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'], - 'PatchAttachment', ls_port['uuid'], None) - except api_exc.NsxApiException as e: - # lr_port should have been deleted - switchlib.delete_port( - self.cluster, lswitch['uuid'], ls_port['uuid']) - raise e - - def _create_lrouter(self, context, router, nexthop): - lrouter = super(NsxAdvancedPlugin, self)._create_lrouter( - context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS) - - router_type = self._find_router_type(router) - self._set_router_type(lrouter['uuid'], router_type) - if router_type == ROUTER_TYPE_BASIC: - return lrouter - - tenant_id = self._get_tenant_id_for_create(context, router) - name = router['name'] - try: - lsname = name[:36] + '-ls' - lswitch = self._create_integration_lswitch( - tenant_id, lsname) - except Exception: - msg = _("Unable to create integration logic switch " - "for router %s") % name - LOG.exception(msg) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - try: - self._add_router_integration_interface(tenant_id, name, - lrouter, lswitch) - except Exception: - msg = _("Unable to add router interface to integration lswitch " - "for router %s") % name - LOG.exception(msg) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - try: - self._create_advanced_service_router( - context, router['id'], name, lrouter, lswitch) - except Exception: - msg = (_("Unable to create advance service router for %s") % name) - LOG.exception(msg) - self.vcns_driver.delete_lswitch(lswitch('uuid')) - routerlib.delete_lrouter(self.cluster, lrouter['uuid']) - raise n_exc.NeutronException(message=msg) - - lrouter['status'] = service_constants.PENDING_CREATE - return lrouter - - def check_router_in_use(self, context, router_id): - router_filter = {'router_id': [router_id]} - vpnservices = self.get_vpnservices( - context, filters={'router_id': [router_id]}) - if vpnservices: - raise vpn_ext.RouterInUseByVPNService( - router_id=router_id, - vpnservice_id=vpnservices[0]['id']) - vips = self.get_vips( - context, filters=router_filter) - if vips: - raise nsx_exc.RouterInUseByLBService( - router_id=router_id, - vip_id=vips[0]['id']) - firewalls = self.get_firewalls( - context, filters=router_filter) - if firewalls: - raise nsx_exc.RouterInUseByFWService( - router_id=router_id, - firewall_id=firewalls[0]['id']) - - def check_router(self, context, router_id): - if not router_id: - msg = _("router_id is not provided!") - raise n_exc.BadRequest(resource='router', msg=msg) - router = self._get_router(context, router_id) - if not self._is_advanced_service_router(context, router=router): - msg = _("router_id:%s is not an advanced router!") % router['id'] - raise n_exc.BadRequest(resource='router', msg=msg) - if router['status'] != service_constants.ACTIVE: - raise nsx_exc.AdvRouterServiceUnavailable(router_id=router['id']) - - def _delete_lrouter(self, context, router_id, nsx_router_id): - binding = vcns_db.get_vcns_router_binding(context.session, router_id) - if not binding: - super(NsxAdvancedPlugin, self)._delete_lrouter( - context, router_id, nsx_router_id) - else: - #Check whether router has an advanced service inserted. - self.check_router_in_use(context, router_id) - vcns_db.update_vcns_router_binding( - context.session, router_id, - status=service_constants.PENDING_DELETE) - - lswitch_id = binding['lswitch_id'] - edge_id = binding['edge_id'] - - # delete lswitch - try: - self.vcns_driver.delete_lswitch(lswitch_id) - except exceptions.ResourceNotFound: - LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id) - - # delete edge - jobdata = { - 'context': context - } - self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata) - - # delete NSX logical router - routerlib.delete_lrouter(self.cluster, nsx_router_id) - - if id in self._router_type: - del self._router_type[router_id] - - def _update_lrouter(self, context, router_id, name, nexthop, routes=None): - if not self._is_advanced_service_router(context, router_id): - return super(NsxAdvancedPlugin, self)._update_lrouter( - context, router_id, name, nexthop, routes=routes) - - previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter( - context, router_id, name, - vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes) - - # TODO(fank): Theoretically users can specify extra routes for - # physical network, and routes for phyiscal network needs to be - # configured on Edge. This can be done by checking if nexthop is in - # external network. But for now we only handle routes for logic - # space and leave it for future enhancement. - - # Let _update_router_gw_info handle nexthop change - #self._vcns_update_static_routes(context, router_id=router_id) - - return previous_routes - - def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, - internal_ip, router_id, - min_num_rules_expected=0): - # NOP for advanced service router - if not self._is_advanced_service_router(context, router_id): - super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules( - context, floating_ip_address, internal_ip, router_id, - min_num_rules_expected=min_num_rules_expected) - - def _update_fip_assoc(self, context, fip, floatingip_db, external_port): - # Update DB model only for advanced service router - router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2] - if (router_id and - not self._is_advanced_service_router(context, router_id)): - super(NsxAdvancedPlugin, self)._update_fip_assoc( - context, fip, floatingip_db, external_port) - else: - super(base.NsxPluginV2, self)._update_fip_assoc( - context, fip, floatingip_db, external_port) - - def _get_nsx_lrouter_status(self, id): - try: - lrouter = routerlib.get_lrouter(self.cluster, id) - lr_status = lrouter["_relations"]["LogicalRouterStatus"] - if lr_status["fabric_status"]: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE - else: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_DOWN - except n_exc.NotFound: - nsx_status = vcns_const.RouterStatus.ROUTER_STATUS_ERROR - - return nsx_status - - def _get_vse_status(self, context, id): - binding = vcns_db.get_vcns_router_binding(context.session, id) - edge_status_level = self.vcns_driver.get_edge_status( - binding['edge_id']) - edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status] - - if edge_status_level > edge_db_status_level: - return edge_status_level - else: - return edge_db_status_level - - def _get_all_nsx_lrouters_statuses(self, tenant_id, fields): - # get nsx lrouters status - nsx_lrouters = routerlib.get_lrouters(self.cluster, - tenant_id, - fields) - - nsx_status = {} - for nsx_lrouter in nsx_lrouters: - if (nsx_lrouter["_relations"]["LogicalRouterStatus"] - ["fabric_status"]): - nsx_status[nsx_lrouter['uuid']] = ( - vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE - ) - else: - nsx_status[nsx_lrouter['uuid']] = ( - vcns_const.RouterStatus.ROUTER_STATUS_DOWN - ) - - return nsx_status - - def _get_all_vse_statuses(self, context): - bindings = self._model_query( - context, vcns_models.VcnsRouterBinding) - - vse_db_status_level = {} - edge_id_to_router_id = {} - router_ids = [] - for binding in bindings: - if not binding['edge_id']: - continue - router_id = binding['router_id'] - router_ids.append(router_id) - edge_id_to_router_id[binding['edge_id']] = router_id - vse_db_status_level[router_id] = ( - ROUTER_STATUS_LEVEL[binding['status']]) - - if not vse_db_status_level: - # no advanced service router, no need to query - return {} - - vse_status_level = {} - edges_status_level = self.vcns_driver.get_edges_statuses() - for edge_id, status_level in edges_status_level.iteritems(): - if edge_id in edge_id_to_router_id: - router_id = edge_id_to_router_id[edge_id] - db_status_level = vse_db_status_level[router_id] - if status_level > db_status_level: - vse_status_level[router_id] = status_level - else: - vse_status_level[router_id] = db_status_level - - return vse_status_level - - def get_router(self, context, id, fields=None): - if fields and 'status' not in fields: - return super(NsxAdvancedPlugin, self).get_router( - context, id, fields=fields) - - router = super(NsxAdvancedPlugin, self).get_router(context, id) - - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - vse_status_level = self._get_vse_status(context, id) - if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: - router['status'] = ROUTER_STATUS[vse_status_level] - - return self._fields(router, fields) - - def get_routers(self, context, filters=None, fields=None, **kwargs): - routers = super(NsxAdvancedPlugin, self).get_routers( - context, filters=filters, **kwargs) - - if fields and 'status' not in fields: - # no status checking, just return regular get_routers - return [self._fields(router, fields) for router in routers] - - for router in routers: - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - break - else: - # no advanced service router, return here - return [self._fields(router, fields) for router in routers] - - vse_status_all = self._get_all_vse_statuses(context) - for router in routers: - router_type = self._find_router_type(router) - if router_type == ROUTER_TYPE_ADVANCED: - vse_status_level = vse_status_all.get(router['id']) - if vse_status_level is None: - vse_status_level = ( - vcns_const.RouterStatus.ROUTER_STATUS_ERROR) - if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]: - router['status'] = ROUTER_STATUS[vse_status_level] - - return [self._fields(router, fields) for router in routers] - - def add_router_interface(self, context, router_id, interface_info): - info = super(NsxAdvancedPlugin, self).add_router_interface( - context, router_id, interface_info) - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - if router.enable_snat: - self._update_nat_rules(context, router) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._vcns_update_static_routes(context, router=router) - return info - - def remove_router_interface(self, context, router_id, interface_info): - info = super(NsxAdvancedPlugin, self).remove_router_interface( - context, router_id, interface_info) - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - if router.enable_snat: - self._update_nat_rules(context, router) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._vcns_update_static_routes(context, router=router) - return info - - def create_floatingip(self, context, floatingip): - fip = super(NsxAdvancedPlugin, self).create_floatingip( - context, floatingip) - router_id = fip.get('router_id') - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_nat_rules(context, router) - self._update_interface(context, router) - return fip - - def update_floatingip(self, context, id, floatingip): - fip = super(NsxAdvancedPlugin, self).update_floatingip( - context, id, floatingip) - router_id = fip.get('router_id') - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_nat_rules(context, router) - self._update_interface(context, router) - return fip - - def delete_floatingip(self, context, id): - fip_db = self._get_floatingip(context, id) - router_id = None - if fip_db.fixed_port_id: - router_id = fip_db.router_id - super(NsxAdvancedPlugin, self).delete_floatingip(context, id) - if router_id and self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_interface(context, router) - self._update_nat_rules(context, router) - - def disassociate_floatingips(self, context, port_id): - routers = set() - - try: - fip_qry = context.session.query(l3_db.FloatingIP) - fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) - for fip_db in fip_dbs: - routers.add(fip_db.router_id) - except sa_exc.NoResultFound: - pass - super(NsxAdvancedPlugin, self).disassociate_floatingips(context, - port_id) - - for router_id in routers: - if self._is_advanced_service_router(context, router_id): - router = self._get_router(context, router_id) - # TODO(fank): do rollback on error, or have a dedicated thread - # do sync work (rollback, re-configure, or make router down) - self._update_interface(context, router) - self._update_nat_rules(context, router) - - # - # FWaaS plugin implementation - # - def _firewall_set_status( - self, context, firewall_id, status, firewall=None): - with context.session.begin(subtransactions=True): - fw_db = self._get_firewall(context, firewall_id) - if status == service_constants.PENDING_UPDATE and ( - fw_db.status == service_constants.PENDING_DELETE): - raise fw_ext.FirewallInPendingState( - firewall_id=firewall_id, pending_state=status) - else: - fw_db.status = status - if firewall: - firewall['status'] = status - - def _ensure_firewall_update_allowed(self, context, firewall_id): - fwall = self.get_firewall(context, firewall_id) - if fwall['status'] in [service_constants.PENDING_CREATE, - service_constants.PENDING_UPDATE, - service_constants.PENDING_DELETE]: - raise fw_ext.FirewallInPendingState(firewall_id=firewall_id, - pending_state=fwall['status']) - - def _ensure_firewall_policy_update_allowed( - self, context, firewall_policy_id): - firewall_policy = self.get_firewall_policy(context, firewall_policy_id) - for firewall_id in firewall_policy.get('firewall_list', []): - self._ensure_firewall_update_allowed(context, firewall_id) - - def _ensure_update_or_delete_firewall_rule( - self, context, firewall_rule_id): - fw_rule = self.get_firewall_rule(context, firewall_rule_id) - if fw_rule.get('firewall_policy_id'): - self._ensure_firewall_policy_update_allowed( - context, fw_rule['firewall_policy_id']) - - def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id): - if not fw_policy_id: - return [] - firewall_policy_db = self._get_firewall_policy(context, fw_policy_id) - return [ - self._make_firewall_rule_dict(fw_rule_db) - for fw_rule_db in firewall_policy_db['firewall_rules'] - ] - - def _get_edge_id_by_vcns_edge_binding(self, context, - router_id): - #Get vcns_router_binding mapping between router and edge - router_binding = vcns_db.get_vcns_router_binding( - context.session, router_id) - return router_binding.edge_id - - def _get_firewall_list_from_firewall_policy(self, context, policy_id): - firewall_policy_db = self._get_firewall_policy(context, policy_id) - return [ - self._make_firewall_dict(fw_db) - for fw_db in firewall_policy_db['firewalls'] - ] - - def _get_firewall_list_from_firewall_rule(self, context, rule_id): - rule = self._get_firewall_rule(context, rule_id) - if not rule.firewall_policy_id: - # The firewall rule is not associated with firewall policy yet - return None - - return self._get_firewall_list_from_firewall_policy( - context, rule.firewall_policy_id) - - def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs): - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, router_id) - firewall_rule_list = kwargs.get('firewall_rule_list') - if not firewall_rule_list: - firewall_rule_list = self._make_firewall_rule_list_by_policy_id( - context, fw['firewall_policy_id']) - fw_with_rules = fw - fw_with_rules['firewall_rule_list'] = firewall_rule_list - try: - self.vcns_driver.update_firewall(context, edge_id, fw_with_rules) - except exceptions.VcnsApiException as e: - self._firewall_set_status( - context, fw['id'], service_constants.ERROR) - msg = (_("Failed to create firewall on vShield Edge " - "bound on router %s") % router_id) - LOG.exception(msg) - raise e - - except exceptions.VcnsBadRequest as e: - self._firewall_set_status( - context, fw['id'], service_constants.ERROR) - LOG.exception(_("Bad Firewall request Input")) - raise e - - def _vcns_delete_firewall(self, context, router_id=None, **kwargs): - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, router_id) - #TODO(linb):do rollback on error - self.vcns_driver.delete_firewall(context, edge_id) - - def create_firewall(self, context, firewall): - LOG.debug(_("create_firewall() called")) - router_id = firewall['firewall'].get(vcns_const.ROUTER_ID) - self.check_router(context, router_id) - if self._get_resource_router_id_binding( - context, firewall_db.Firewall, router_id=router_id): - msg = _("A firewall is already associated with the router") - LOG.error(msg) - raise nsx_exc.ServiceOverQuota( - overs='firewall', err_msg=msg) - - fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall) - #Add router service insertion binding with firewall object - res = { - 'id': fw['id'], - 'router_id': router_id - } - self._process_create_resource_router_id( - context, res, firewall_db.Firewall) - # Since there is only one firewall per edge, - # here would be bulk configuration operation on firewall - self._vcns_update_firewall(context, fw, router_id) - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - fw[rsi.ROUTER_ID] = router_id - return fw - - def update_firewall(self, context, id, firewall): - LOG.debug(_("update_firewall() called")) - self._ensure_firewall_update_allowed(context, id) - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=id) - rule_list_pre = self._make_firewall_rule_list_by_policy_id( - context, - self.get_firewall(context, id)['firewall_policy_id']) - firewall['firewall']['status'] = service_constants.PENDING_UPDATE - fw = super(NsxAdvancedPlugin, self).update_firewall( - context, id, firewall) - fw[rsi.ROUTER_ID] = service_router_binding['router_id'] - rule_list_new = self._make_firewall_rule_list_by_policy_id( - context, fw['firewall_policy_id']) - if rule_list_pre == rule_list_new: - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - return fw - else: - self._vcns_update_firewall( - context, fw, service_router_binding.router_id, - firewall_rule_list=rule_list_new) - self._firewall_set_status( - context, fw['id'], service_constants.ACTIVE, fw) - return fw - - def delete_firewall(self, context, id): - LOG.debug(_("delete_firewall() called")) - self._firewall_set_status( - context, id, service_constants.PENDING_DELETE) - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=id) - self._vcns_delete_firewall(context, service_router_binding.router_id) - super(NsxAdvancedPlugin, self).delete_firewall(context, id) - self._delete_resource_router_id_binding( - context, id, firewall_db.Firewall) - - def get_firewall(self, context, id, fields=None): - fw = super(NsxAdvancedPlugin, self).get_firewall( - context, id, fields) - if fields and rsi.ROUTER_ID not in fields: - return fw - - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - fw[rsi.ROUTER_ID] = service_router_binding['router_id'] - return fw - - def get_firewalls(self, context, filters=None, fields=None): - fws = super(NsxAdvancedPlugin, self).get_firewalls( - context, filters, fields) - if fields and rsi.ROUTER_ID not in fields: - return fws - service_router_bindings = self._get_resource_router_id_bindings( - context, firewall_db.Firewall, - resource_ids=[fw['id'] for fw in fws]) - mapping = dict([(binding['resource_id'], binding['router_id']) - for binding in service_router_bindings]) - for fw in fws: - fw[rsi.ROUTER_ID] = mapping[fw['id']] - return fws - - def update_firewall_rule(self, context, id, firewall_rule): - LOG.debug(_("update_firewall_rule() called")) - self._ensure_update_or_delete_firewall_rule(context, id) - fwr_pre = self.get_firewall_rule(context, id) - fwr = super(NsxAdvancedPlugin, self).update_firewall_rule( - context, id, firewall_rule) - if fwr_pre == fwr: - return fwr - - # check if this rule is associated with firewall - fw_list = self._get_firewall_list_from_firewall_rule(context, id) - if not fw_list: - return fwr - - for fw in fw_list: - # get router service insertion binding with firewall id - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - #TODO(linb): do rollback on error - self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr) - - return fwr - - def update_firewall_policy(self, context, id, firewall_policy): - LOG.debug(_("update_firewall_policy() called")) - self._ensure_firewall_policy_update_allowed(context, id) - firewall_rules_pre = self._make_firewall_rule_list_by_policy_id( - context, id) - fwp = super(NsxAdvancedPlugin, self).update_firewall_policy( - context, id, firewall_policy) - firewall_rules = self._make_firewall_rule_list_by_policy_id( - context, id) - if firewall_rules_pre == firewall_rules: - return fwp - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - - for fw in fw_list: - # Get the router_service insertion binding with firewall id - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - self._vcns_update_firewall( - context, fw, service_router_binding.router_id, - firewall_rule_list=firewall_rules) - return fwp - - def insert_rule(self, context, id, rule_info): - LOG.debug(_("insert_rule() called")) - self._ensure_firewall_policy_update_allowed(context, id) - fwp = super(NsxAdvancedPlugin, self).insert_rule( - context, id, rule_info) - fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( - context, rule_info['firewall_rule_id']) - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - for fw in fw_list: - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - if rule_info.get('insert_before') or rule_info.get('insert_after'): - #if insert_before or insert_after is set, we would call - #VCNS insert_rule API - #TODO(linb): do rollback on error - self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr) - else: - #Else we would call bulk configuration on the firewall - self._vcns_update_firewall(context, fw, edge_id=edge_id) - return fwp - - def remove_rule(self, context, id, rule_info): - LOG.debug(_("remove_rule() called")) - self._ensure_firewall_policy_update_allowed(context, id) - fwp = super(NsxAdvancedPlugin, self).remove_rule( - context, id, rule_info) - fwr = super(NsxAdvancedPlugin, self).get_firewall_rule( - context, rule_info['firewall_rule_id']) - - # check if this policy is associated with firewall - fw_list = self._get_firewall_list_from_firewall_policy(context, id) - if not fw_list: - return fwp - for fw in fw_list: - # TODO(fank): optimized by using _get_resource_router_id_bindings - service_router_binding = self._get_resource_router_id_binding( - context, firewall_db.Firewall, resource_id=fw['id']) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - #TODO(linb): do rollback on error - self.vcns_driver.delete_firewall_rule( - context, fwr['id'], edge_id) - return fwp - - # - # LBAAS service plugin implementation - # - def _get_edge_id_by_vip_id(self, context, vip_id): - try: - service_router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=vip_id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to find the edge with " - "vip_id: %s"), vip_id) - return self._get_edge_id_by_vcns_edge_binding( - context, service_router_binding.router_id) - - def _get_all_vip_addrs_by_router_id( - self, context, router_id): - vip_bindings = self._get_resource_router_id_bindings( - context, loadbalancer_db.Vip, router_ids=[router_id]) - vip_addrs = [] - for vip_binding in vip_bindings: - vip = self.get_vip(context, vip_binding.resource_id) - vip_addrs.append(vip.get('address')) - return vip_addrs - - def _add_router_service_insertion_binding(self, context, resource_id, - router_id, - model): - res = { - 'id': resource_id, - 'router_id': router_id - } - self._process_create_resource_router_id(context, res, - model) - - def _resource_set_status(self, context, model, id, status, obj=None, - pool_id=None): - with context.session.begin(subtransactions=True): - try: - qry = context.session.query(model) - if issubclass(model, loadbalancer_db.PoolMonitorAssociation): - res = qry.filter_by(monitor_id=id, - pool_id=pool_id).one() - else: - res = qry.filter_by(id=id).one() - if status == service_constants.PENDING_UPDATE and ( - res.get('status') == service_constants.PENDING_DELETE): - msg = (_("Operation can't be performed, Since resource " - "%(model)s : %(id)s is in DELETEing status!") % - {'model': model, - 'id': id}) - LOG.error(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - else: - res.status = status - except sa_exc.NoResultFound: - msg = (_("Resource %(model)s : %(id)s not found!") % - {'model': model, - 'id': id}) - LOG.exception(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - if obj: - obj['status'] = status - - def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs): - pool = self.get_pool(context, pool_id) - edge_id = kwargs.get('edge_id') - if not edge_id: - edge_id = self._get_edge_id_by_vip_id( - context, pool['vip_id']) - #Check wheter the pool is already created on the router - #in case of future's M:N relation between Pool and Vip - - #Check associated HealthMonitors and then create them - for monitor_id in pool.get('health_monitors'): - hm = self.get_health_monitor(context, monitor_id) - try: - self.vcns_driver.create_health_monitor( - context, edge_id, hm) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create healthmonitor " - "associated with pool id: %s!") % pool_id) - for monitor_ide in pool.get('health_monitors'): - if monitor_ide == monitor_id: - break - self.vcns_driver.delete_health_monitor( - context, monitor_ide, edge_id) - #Create the pool on the edge - members = [ - super(NsxAdvancedPlugin, self).get_member( - context, member_id) - for member_id in pool.get('members') - ] - try: - self.vcns_driver.create_pool(context, edge_id, pool, members) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create pool on vshield edge")) - self.vcns_driver.delete_pool( - context, pool_id, edge_id) - for monitor_id in pool.get('health_monitors'): - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - - def _vcns_update_pool(self, context, pool, **kwargs): - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - members = kwargs.get('members') - if not members: - members = [ - super(NsxAdvancedPlugin, self).get_member( - context, member_id) - for member_id in pool.get('members') - ] - self.vcns_driver.update_pool(context, edge_id, pool, members) - - def create_vip(self, context, vip): - LOG.debug(_("create_vip() called")) - router_id = vip['vip'].get(vcns_const.ROUTER_ID) - self.check_router(context, router_id) - #Check whether the vip port is an external port - subnet_id = vip['vip']['subnet_id'] - network_id = self.get_subnet(context, subnet_id)['network_id'] - ext_net = self._get_network(context, network_id) - if not ext_net.external: - msg = (_("Network '%s' is not a valid external " - "network") % network_id) - raise nsx_exc.NsxPluginException(err_msg=msg) - - v = super(NsxAdvancedPlugin, self).create_vip(context, vip) - #Get edge_id for the resource - router_binding = vcns_db.get_vcns_router_binding( - context.session, - router_id) - edge_id = router_binding.edge_id - #Add vip_router binding - self._add_router_service_insertion_binding(context, v['id'], - router_id, - loadbalancer_db.Vip) - #Create the vip port on vShield Edge - router = self._get_router(context, router_id) - self._update_interface(context, router, sync=True) - #Create the vip and associated pool/monitor on the corresponding edge - try: - self._vcns_create_pool_and_monitors( - context, v['pool_id'], edge_id=edge_id) - self.vcns_driver.create_vip(context, edge_id, v) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create vip!")) - self._delete_resource_router_id_binding( - context, v['id'], loadbalancer_db.Vip) - super(NsxAdvancedPlugin, self).delete_vip(context, v['id']) - self._resource_set_status(context, loadbalancer_db.Vip, - v['id'], service_constants.ACTIVE, v) - v[rsi.ROUTER_ID] = router_id - - return v - - def update_vip(self, context, id, vip): - edge_id = self._get_edge_id_by_vip_id(context, id) - old_vip = self.get_vip(context, id) - session_persistence_update = bool( - vip['vip'].get('session_persistence')) - vip['vip']['status'] = service_constants.PENDING_UPDATE - v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip) - v[rsi.ROUTER_ID] = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=id)['router_id'] - if old_vip['pool_id'] != v['pool_id']: - self.vcns_driver.delete_vip(context, id) - #Delete old pool/monitor on the edge - #TODO(linb): Factor out procedure for removing pool and health - #separate method - old_pool = self.get_pool(context, old_vip['pool_id']) - self.vcns_driver.delete_pool( - context, old_vip['pool_id'], edge_id) - for monitor_id in old_pool.get('health_monitors'): - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - #Create new pool/monitor object on the edge - #TODO(linb): add exception handle if error - self._vcns_create_pool_and_monitors( - context, v['pool_id'], edge_id=edge_id) - self.vcns_driver.create_vip(context, edge_id, v) - return v - try: - self.vcns_driver.update_vip(context, v, session_persistence_update) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update vip with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Vip, - id, service_constants.ERROR, v) - - self._resource_set_status(context, loadbalancer_db.Vip, - v['id'], service_constants.ACTIVE, v) - return v - - def delete_vip(self, context, id): - v = self.get_vip(context, id) - self._resource_set_status( - context, loadbalancer_db.Vip, - id, service_constants.PENDING_DELETE) - try: - self.vcns_driver.delete_vip(context, id) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete vip with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Vip, - id, service_constants.ERROR) - edge_id = self._get_edge_id_by_vip_id(context, id) - #Check associated HealthMonitors and then delete them - pool = self.get_pool(context, v['pool_id']) - self.vcns_driver.delete_pool(context, v['pool_id'], edge_id) - for monitor_id in pool.get('health_monitors'): - #TODO(linb): do exception handle if error - self.vcns_driver.delete_health_monitor( - context, monitor_id, edge_id) - - router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=id) - router = self._get_router(context, router_binding.router_id) - self._delete_resource_router_id_binding( - context, id, loadbalancer_db.Vip) - super(NsxAdvancedPlugin, self).delete_vip(context, id) - self._update_interface(context, router, sync=True) - - def get_vip(self, context, id, fields=None): - vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields) - if fields and rsi.ROUTER_ID not in fields: - return vip - - service_router_binding = self._get_resource_router_id_binding( - context, loadbalancer_db.Vip, resource_id=vip['id']) - vip[rsi.ROUTER_ID] = service_router_binding['router_id'] - return vip - - def get_vips(self, context, filters=None, fields=None): - vips = super(NsxAdvancedPlugin, self).get_vips( - context, filters, fields) - if fields and rsi.ROUTER_ID not in fields: - return vips - service_router_bindings = self._get_resource_router_id_bindings( - context, loadbalancer_db.Vip, - resource_ids=[vip['id'] for vip in vips]) - mapping = dict([(binding['resource_id'], binding['router_id']) - for binding in service_router_bindings]) - for vip in vips: - vip[rsi.ROUTER_ID] = mapping[vip['id']] - return vips - - def update_pool(self, context, id, pool): - pool['pool']['status'] = service_constants.PENDING_UPDATE - p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool) - #Check whether the pool is already associated with the vip - if not p.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ACTIVE, p) - return p - try: - self._vcns_update_pool(context, p) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update pool with id: %s!"), id) - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ERROR, p) - self._resource_set_status(context, loadbalancer_db.Pool, - p['id'], service_constants.ACTIVE, p) - return p - - def create_member(self, context, member): - m = super(NsxAdvancedPlugin, self).create_member(context, member) - pool_id = m.get('pool_id') - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, - service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update pool with the member")) - super(NsxAdvancedPlugin, self).delete_member(context, m['id']) - - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - - def update_member(self, context, id, member): - member['member']['status'] = service_constants.PENDING_UPDATE - old_member = self.get_member(context, id) - m = super(NsxAdvancedPlugin, self).update_member( - context, id, member) - - if m['pool_id'] != old_member['pool_id']: - old_pool_id = old_member['pool_id'] - old_pool = self.get_pool(context, old_pool_id) - if old_pool.get('vip_id'): - self._resource_set_status( - context, loadbalancer_db.Pool, - old_pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, old_pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update old pool " - "with the member")) - super(NsxAdvancedPlugin, self).delete_member( - context, m['id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - old_pool_id, service_constants.ACTIVE) - - pool_id = m['pool_id'] - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, - service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update pool with the member")) - super(NsxAdvancedPlugin, self).delete_member( - context, m['id']) - - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status(context, loadbalancer_db.Member, - m['id'], service_constants.ACTIVE, m) - return m - - def delete_member(self, context, id): - m = self.get_member(context, id) - super(NsxAdvancedPlugin, self).delete_member(context, id) - pool_id = m['pool_id'] - pool = self.get_pool(context, pool_id) - if not pool.get('vip_id'): - return - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update pool with the member")) - self._resource_set_status(context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - - def update_health_monitor(self, context, id, health_monitor): - old_hm = super(NsxAdvancedPlugin, self).get_health_monitor( - context, id) - hm = super(NsxAdvancedPlugin, self).update_health_monitor( - context, id, health_monitor) - for hm_pool in hm.get('pools'): - pool_id = hm_pool['pool_id'] - pool = self.get_pool(context, pool_id) - if pool.get('vip_id'): - edge_id = self._get_edge_id_by_vip_id( - context, pool['vip_id']) - try: - self.vcns_driver.update_health_monitor( - context, edge_id, old_hm, hm) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update monitor " - "with id: %s!"), id) - return hm - - def create_pool_health_monitor(self, context, - health_monitor, pool_id): - monitor_id = health_monitor['health_monitor']['id'] - pool = self.get_pool(context, pool_id) - monitors = pool.get('health_monitors') - if len(monitors) > 0: - msg = _("Vcns right now can only support " - "one monitor per pool") - LOG.error(msg) - raise nsx_exc.NsxPluginException(err_msg=msg) - #Check whether the pool is already associated with the vip - if not pool.get('vip_id'): - res = super(NsxAdvancedPlugin, - self).create_pool_health_monitor(context, - health_monitor, - pool_id) - return res - #Get the edge_id - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - res = super(NsxAdvancedPlugin, - self).create_pool_health_monitor(context, - health_monitor, - pool_id) - monitor = self.get_health_monitor(context, monitor_id) - #TODO(linb)Add Exception handle if error - self.vcns_driver.create_health_monitor(context, edge_id, monitor) - #Get updated pool - pool['health_monitors'].append(monitor['id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to associate monitor with pool!")) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ERROR) - super(NsxAdvancedPlugin, self).delete_pool_health_monitor( - context, monitor_id, pool_id) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - self._resource_set_status( - context, loadbalancer_db.PoolMonitorAssociation, - monitor_id, service_constants.ACTIVE, res, - pool_id=pool_id) - return res - - def delete_pool_health_monitor(self, context, id, pool_id): - super(NsxAdvancedPlugin, self).delete_pool_health_monitor( - context, id, pool_id) - pool = self.get_pool(context, pool_id) - #Check whether the pool is already associated with the vip - if pool.get('vip_id'): - #Delete the monitor on vshield edge - edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id']) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.PENDING_UPDATE) - try: - self._vcns_update_pool(context, pool) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception( - _("Failed to update pool with pool_monitor!")) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ERROR) - #TODO(linb): Add exception handle if error - self.vcns_driver.delete_health_monitor(context, id, edge_id) - self._resource_set_status( - context, loadbalancer_db.Pool, - pool_id, service_constants.ACTIVE) - - def _vcns_update_ipsec_config( - self, context, vpnservice_id, removed_ipsec_conn_id=None): - sites = [] - vpn_service = self._get_vpnservice(context, vpnservice_id) - edge_id = self._get_edge_id_by_vcns_edge_binding( - context, vpn_service.router_id) - if not vpn_service.router.gw_port: - msg = _("Failed to update ipsec vpn configuration on edge, since " - "the router: %s does not have a gateway yet!" - ) % vpn_service.router_id - LOG.error(msg) - raise exceptions.VcnsBadRequest(resource='router', msg=msg) - - external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address'] - subnet = self._make_subnet_dict(vpn_service.subnet) - for ipsec_site_conn in vpn_service.ipsec_site_connections: - if ipsec_site_conn.id != removed_ipsec_conn_id: - site = self._make_ipsec_site_connection_dict(ipsec_site_conn) - ikepolicy = self._make_ikepolicy_dict( - ipsec_site_conn.ikepolicy) - ipsecpolicy = self._make_ipsecpolicy_dict( - ipsec_site_conn.ipsecpolicy) - sites.append({'site': site, - 'ikepolicy': ikepolicy, - 'ipsecpolicy': ipsecpolicy, - 'subnet': subnet, - 'external_ip': external_ip}) - try: - self.vcns_driver.update_ipsec_config( - edge_id, sites, enabled=vpn_service.admin_state_up) - except exceptions.VcnsBadRequest: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Bad or unsupported Input request!")) - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - msg = (_("Failed to update ipsec VPN configuration " - "with vpnservice: %(vpnservice_id)s on vShield Edge: " - "%(edge_id)s") % {'vpnservice_id': vpnservice_id, - 'edge_id': edge_id}) - LOG.exception(msg) - - def create_vpnservice(self, context, vpnservice): - LOG.debug(_("create_vpnservice() called")) - router_id = vpnservice['vpnservice'].get('router_id') - self.check_router(context, router_id) - if self.get_vpnservices(context, filters={'router_id': [router_id]}): - msg = _("a vpnservice is already associated with the router: %s" - ) % router_id - LOG.warning(msg) - raise nsx_exc.ServiceOverQuota( - overs='vpnservice', err_msg=msg) - - service = super(NsxAdvancedPlugin, self).create_vpnservice( - context, vpnservice) - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - - def update_vpnservice(self, context, vpnservice_id, vpnservice): - vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE - service = super(NsxAdvancedPlugin, self).update_vpnservice( - context, vpnservice_id, vpnservice) - # Only admin_state_up attribute is configurable on Edge. - if vpnservice['vpnservice'].get('admin_state_up') is None: - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - # Test whether there is one ipsec site connection attached to - # the vpnservice. If not, just return without updating ipsec - # config on edge side. - vpn_service_db = self._get_vpnservice(context, vpnservice_id) - if not vpn_service_db.ipsec_site_connections: - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - try: - self._vcns_update_ipsec_config(context, service['id']) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ERROR, service) - self._resource_set_status( - context, vpn_db.VPNService, - service['id'], service_constants.ACTIVE, service) - return service - - def create_ipsec_site_connection(self, context, ipsec_site_connection): - ipsec_site_conn = super( - NsxAdvancedPlugin, self).create_ipsec_site_connection( - context, ipsec_site_connection) - try: - self._vcns_update_ipsec_config( - context, ipsec_site_conn['vpnservice_id']) - except Exception: - with excutils.save_and_reraise_exception(): - super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( - context, ipsec_site_conn['id']) - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) - return ipsec_site_conn - - def update_ipsec_site_connection(self, context, ipsec_site_connection_id, - ipsec_site_connection): - ipsec_site_connection['ipsec_site_connection']['status'] = ( - service_constants.PENDING_UPDATE) - ipsec_site_conn = super( - NsxAdvancedPlugin, self).update_ipsec_site_connection( - context, ipsec_site_connection_id, ipsec_site_connection) - try: - self._vcns_update_ipsec_config( - context, ipsec_site_conn['vpnservice_id']) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'], - service_constants.ERROR, ipsec_site_conn) - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn) - return ipsec_site_conn - - def delete_ipsec_site_connection(self, context, ipsec_site_conn_id): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, - ipsec_site_conn_id, service_constants.PENDING_DELETE) - vpnservice_id = self.get_ipsec_site_connection( - context, ipsec_site_conn_id)['vpnservice_id'] - try: - self._vcns_update_ipsec_config( - context, vpnservice_id, ipsec_site_conn_id) - except Exception: - with excutils.save_and_reraise_exception(): - self._resource_set_status( - context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id, - service_constants.ERROR) - super(NsxAdvancedPlugin, self).delete_ipsec_site_connection( - context, ipsec_site_conn_id) - - -class VcnsCallbacks(object): - """Edge callback implementation Callback functions for - asynchronous tasks. - """ - def __init__(self, plugin): - self.plugin = plugin - - def edge_deploy_started(self, task): - """callback when deployment task started.""" - jobdata = task.userdata['jobdata'] - context = jobdata['context'] - edge_id = task.userdata.get('edge_id') - neutron_router_id = jobdata['neutron_router_id'] - name = task.userdata['router_name'] - if edge_id: - LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), { - 'edge_id': edge_id, - 'name': name}) - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, edge_id=edge_id) - else: - LOG.debug(_("Failed to deploy Edge for router %s"), name) - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ERROR) - - def edge_deploy_result(self, task): - """callback when deployment task finished.""" - jobdata = task.userdata['jobdata'] - lrouter = jobdata['lrouter'] - context = jobdata['context'] - name = task.userdata['router_name'] - neutron_router_id = jobdata['neutron_router_id'] - router_db = None - try: - router_db = self.plugin._get_router( - context, neutron_router_id) - except l3.RouterNotFound: - # Router might have been deleted before deploy finished - LOG.exception(_("Router %s not found"), lrouter['uuid']) - - if task.status == tasks_const.TaskStatus.COMPLETED: - LOG.debug(_("Successfully deployed %(edge_id)s for " - "router %(name)s"), { - 'edge_id': task.userdata['edge_id'], - 'name': name}) - if (router_db and - router_db['status'] == service_constants.PENDING_CREATE): - router_db['status'] = service_constants.ACTIVE - - binding = vcns_db.get_vcns_router_binding( - context.session, neutron_router_id) - # only update status to active if its status is pending create - if binding['status'] == service_constants.PENDING_CREATE: - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ACTIVE) - else: - LOG.debug(_("Failed to deploy Edge for router %s"), name) - if router_db: - router_db['status'] = service_constants.ERROR - vcns_db.update_vcns_router_binding( - context.session, neutron_router_id, - status=service_constants.ERROR) - - def edge_delete_result(self, task): - jobdata = task.userdata['jobdata'] - router_id = task.userdata['router_id'] - context = jobdata['context'] - if task.status == tasks_const.TaskStatus.COMPLETED: - vcns_db.delete_vcns_router_binding(context.session, - router_id) - - def interface_update_result(self, task): - LOG.debug(_("interface_update_result %d"), task.status) - - def snat_create_result(self, task): - LOG.debug(_("snat_create_result %d"), task.status) - - def snat_delete_result(self, task): - LOG.debug(_("snat_delete_result %d"), task.status) - - def dnat_create_result(self, task): - LOG.debug(_("dnat_create_result %d"), task.status) - - def dnat_delete_result(self, task): - LOG.debug(_("dnat_delete_result %d"), task.status) - - def routes_update_result(self, task): - LOG.debug(_("routes_update_result %d"), task.status) - - def nat_update_result(self, task): - LOG.debug(_("nat_update_result %d"), task.status) - - -def _process_base_create_lswitch_args(*args, **kwargs): - tags = utils.get_tags() - tags.append({"tag": args[1], - "scope": "quantum_net_id"}) - if args[2]: - tags.append({"tag": args[2], "scope": "os_tid"}) - switch_name = args[3] - tz_config = args[4] - if kwargs.get("shared", False) or len(args) >= 6: - tags.append({"tag": "true", "scope": "shared"}) - if kwargs.get("tags"): - tags.extend(kwargs["tags"]) - return switch_name, tz_config, tags diff --git a/neutron/plugins/vmware/shell/__init__.py b/neutron/plugins/vmware/shell/__init__.py deleted file mode 100644 index e0b15b8d2..000000000 --- a/neutron/plugins/vmware/shell/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from neutron.plugins.vmware.shell import commands as cmd -from neutronclient import shell - - -class NsxManage(shell.NeutronShell): - - def __init__(self, api_version): - super(NsxManage, self).__init__(api_version) - self.command_manager.add_command('net-migrate', cmd.NetworkMigrate) - self.command_manager.add_command('net-report', cmd.NetworkReport) - - def build_option_parser(self, description, version): - parser = super(NsxManage, self).build_option_parser( - description, version) - return parser - - def initialize_app(self, argv): - super(NsxManage, self).initialize_app(argv) - self.client = self.client_manager.neutron - - -def main(): - return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:]) diff --git a/neutron/plugins/vmware/shell/commands.py b/neutron/plugins/vmware/shell/commands.py deleted file mode 100644 index bd6706ff8..000000000 --- a/neutron/plugins/vmware/shell/commands.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2014 VMware, Inc. -# -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from neutronclient.neutron import v2_0 as client - -LSN_PATH = '/lsns' - - -def print_report(write_func, report): - write_func(_("\nService type = %s\n") % report['report']['type']) - services = ','.join(report['report']['services']) - ports = ','.join(report['report']['ports']) - write_func(_("Service uuids = %s\n") % services) - write_func(_("Port uuids = %s\n\n") % ports) - - -class NetworkReport(client.NeutronCommand): - """Retrieve network migration report.""" - - def get_parser(self, prog_name): - parser = super(NetworkReport, self).get_parser(prog_name) - parser.add_argument('network', metavar='network', - help=_('ID or name of network to run report on')) - return parser - - def run(self, parsed_args): - net = parsed_args.network - net_id = client.find_resourceid_by_name_or_id(self.app.client, - 'network', net) - res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) - if res: - self.app.stdout.write(_('Migration report is:\n')) - print_report(self.app.stdout.write, res['lsn']) - - -class NetworkMigrate(client.NeutronCommand): - """Perform network migration.""" - - def get_parser(self, prog_name): - parser = super(NetworkMigrate, self).get_parser(prog_name) - parser.add_argument('network', metavar='network', - help=_('ID or name of network to migrate')) - return parser - - def run(self, parsed_args): - net = parsed_args.network - net_id = client.find_resourceid_by_name_or_id(self.app.client, - 'network', net) - body = {'network': net_id} - res = self.app.client.post(LSN_PATH, body={'lsn': body}) - if res: - self.app.stdout.write(_('Migration has been successful:\n')) - print_report(self.app.stdout.write, res['lsn']) diff --git a/neutron/plugins/vmware/vshield/__init__.py b/neutron/plugins/vmware/vshield/__init__.py deleted file mode 100644 index 6818a0c8f..000000000 --- a/neutron/plugins/vmware/vshield/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2013 VMware, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/neutron/plugins/vmware/vshield/common/VcnsApiClient.py b/neutron/plugins/vmware/vshield/common/VcnsApiClient.py deleted file mode 100644 index 7127b6780..000000000 --- a/neutron/plugins/vmware/vshield/common/VcnsApiClient.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 - -import eventlet - -from neutron.openstack.common import jsonutils -from neutron.plugins.vmware.vshield.common import exceptions - -httplib2 = eventlet.import_patched('httplib2') - - -def xmldumps(obj): - config = "" - if isinstance(obj, dict): - for key, value in obj.iteritems(): - cfg = "<%s>%s" % (key, xmldumps(value), key) - config += cfg - elif isinstance(obj, list): - for value in obj: - config += xmldumps(value) - else: - config = obj - - return config - - -class VcnsApiHelper(object): - errors = { - 303: exceptions.ResourceRedirect, - 400: exceptions.RequestBad, - 403: exceptions.Forbidden, - 404: exceptions.ResourceNotFound, - 415: exceptions.MediaTypeUnsupport, - 503: exceptions.ServiceUnavailable - } - - def __init__(self, address, user, password, format='json'): - self.authToken = base64.encodestring("%s:%s" % (user, password)) - self.user = user - self.passwd = password - self.address = address - self.format = format - if format == 'json': - self.encode = jsonutils.dumps - else: - self.encode = xmldumps - - def request(self, method, uri, params=None): - uri = self.address + uri - http = httplib2.Http() - http.disable_ssl_certificate_validation = True - headers = { - 'Content-Type': 'application/' + self.format, - 'Accept': 'application/' + 'json', - 'Authorization': 'Basic ' + self.authToken - } - body = self.encode(params) if params else None - header, response = http.request(uri, method, - body=body, headers=headers) - status = int(header['status']) - if 200 <= status < 300: - return header, response - if status in self.errors: - cls = self.errors[status] - else: - cls = exceptions.VcnsApiException - raise cls(uri=uri, status=status, header=header, response=response) diff --git a/neutron/plugins/vmware/vshield/common/__init__.py b/neutron/plugins/vmware/vshield/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/vshield/common/constants.py b/neutron/plugins/vmware/vshield/common/constants.py deleted file mode 100644 index 1c2aa25db..000000000 --- a/neutron/plugins/vmware/vshield/common/constants.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -EDGE_ID = 'edge_id' -ROUTER_ID = 'router_id' - -# Interface -EXTERNAL_VNIC_INDEX = 0 -INTERNAL_VNIC_INDEX = 1 -EXTERNAL_VNIC_NAME = "external" -INTERNAL_VNIC_NAME = "internal" - -INTEGRATION_LR_IPADDRESS = "169.254.2.1/28" -INTEGRATION_EDGE_IPADDRESS = "169.254.2.3" -INTEGRATION_SUBNET_NETMASK = "255.255.255.240" - -# SNAT rule location -PREPEND = 0 -APPEND = -1 - -# error code -VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013 - -SUFFIX_LENGTH = 8 - - -# router status by number -class RouterStatus(object): - ROUTER_STATUS_ACTIVE = 0 - ROUTER_STATUS_DOWN = 1 - ROUTER_STATUS_PENDING_CREATE = 2 - ROUTER_STATUS_PENDING_DELETE = 3 - ROUTER_STATUS_ERROR = 4 diff --git a/neutron/plugins/vmware/vshield/common/exceptions.py b/neutron/plugins/vmware/vshield/common/exceptions.py deleted file mode 100644 index 4764db034..000000000 --- a/neutron/plugins/vmware/vshield/common/exceptions.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: linb, VMware - -from neutron.common import exceptions - - -class VcnsException(exceptions.NeutronException): - pass - - -class VcnsGeneralException(VcnsException): - def __init__(self, message): - self.message = message - super(VcnsGeneralException, self).__init__() - - -class VcnsBadRequest(exceptions.BadRequest): - pass - - -class VcnsNotFound(exceptions.NotFound): - message = _('%(resource)s not found: %(msg)s') - - -class VcnsApiException(VcnsException): - message = _("An unknown exception %(status)s occurred: %(response)s.") - - def __init__(self, **kwargs): - super(VcnsApiException, self).__init__(**kwargs) - - self.status = kwargs.get('status') - self.header = kwargs.get('header') - self.response = kwargs.get('response') - - -class ResourceRedirect(VcnsApiException): - message = _("Resource %(uri)s has been redirected") - - -class RequestBad(VcnsApiException): - message = _("Request %(uri)s is Bad, response %(response)s") - - -class Forbidden(VcnsApiException): - message = _("Forbidden: %(uri)s") - - -class ResourceNotFound(VcnsApiException): - message = _("Resource %(uri)s not found") - - -class MediaTypeUnsupport(VcnsApiException): - message = _("Media Type %(uri)s is not supported") - - -class ServiceUnavailable(VcnsApiException): - message = _("Service Unavailable: %(uri)s") diff --git a/neutron/plugins/vmware/vshield/edge_appliance_driver.py b/neutron/plugins/vmware/vshield/edge_appliance_driver.py deleted file mode 100644 index aadc1cb4b..000000000 --- a/neutron/plugins/vmware/vshield/edge_appliance_driver.py +++ /dev/null @@ -1,667 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Kaiwei Fan, VMware, Inc. -# @author: Bo Link, VMware, Inc. - -from neutron.openstack.common import excutils -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import utils -from neutron.plugins.vmware.vshield.common import ( - constants as vcns_const) -from neutron.plugins.vmware.vshield.common import constants as common_constants -from neutron.plugins.vmware.vshield.common import exceptions -from neutron.plugins.vmware.vshield.tasks import constants -from neutron.plugins.vmware.vshield.tasks import tasks - -LOG = logging.getLogger(__name__) - - -class EdgeApplianceDriver(object): - def __init__(self): - # store the last task per edge that has the latest config - self.updated_task = { - 'nat': {}, - 'route': {}, - } - - def _assemble_edge(self, name, appliance_size="compact", - deployment_container_id=None, datacenter_moid=None, - enable_aesni=True, hypervisor_assist=False, - enable_fips=False, remote_access=False): - edge = { - 'name': name, - 'fqdn': name, - 'hypervisorAssist': hypervisor_assist, - 'type': 'gatewayServices', - 'enableAesni': enable_aesni, - 'enableFips': enable_fips, - 'cliSettings': { - 'remoteAccess': remote_access - }, - 'appliances': { - 'applianceSize': appliance_size - }, - 'vnics': { - 'vnics': [] - } - } - if deployment_container_id: - edge['appliances']['deploymentContainerId'] = ( - deployment_container_id) - if datacenter_moid: - edge['datacenterMoid'] = datacenter_moid, - - return edge - - def _assemble_edge_appliance(self, resource_pool_id, datastore_id): - appliance = {} - if resource_pool_id: - appliance['resourcePoolId'] = resource_pool_id - if datastore_id: - appliance['datastoreId'] = datastore_id - return appliance - - def _assemble_edge_vnic(self, name, index, portgroup_id, - primary_address=None, subnet_mask=None, - secondary=None, - type="internal", - enable_proxy_arp=False, - enable_send_redirects=True, - is_connected=True, - mtu=1500): - vnic = { - 'index': index, - 'name': name, - 'type': type, - 'portgroupId': portgroup_id, - 'mtu': mtu, - 'enableProxyArp': enable_proxy_arp, - 'enableSendRedirects': enable_send_redirects, - 'isConnected': is_connected - } - if primary_address and subnet_mask: - address_group = { - 'primaryAddress': primary_address, - 'subnetMask': subnet_mask - } - if secondary: - address_group['secondaryAddresses'] = { - 'ipAddress': secondary, - 'type': 'IpAddressesDto' - } - - vnic['addressGroups'] = { - 'addressGroups': [address_group] - } - - return vnic - - def _edge_status_to_level(self, status): - if status == 'GREEN': - status_level = common_constants.RouterStatus.ROUTER_STATUS_ACTIVE - elif status in ('GREY', 'YELLOW'): - status_level = common_constants.RouterStatus.ROUTER_STATUS_DOWN - else: - status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR - return status_level - - def _enable_loadbalancer(self, edge): - if not edge.get('featureConfigs') or ( - not edge['featureConfigs'].get('features')): - edge['featureConfigs'] = {'features': []} - edge['featureConfigs']['features'].append( - {'featureType': 'loadbalancer_4.0', - 'enabled': True}) - - def get_edge_status(self, edge_id): - try: - response = self.vcns.get_edge_status(edge_id)[1] - status_level = self._edge_status_to_level( - response['edgeStatus']) - except exceptions.VcnsApiException as e: - LOG.exception(_("VCNS: Failed to get edge status:\n%s"), - e.response) - status_level = common_constants.RouterStatus.ROUTER_STATUS_ERROR - try: - desc = jsonutils.loads(e.response) - if desc.get('errorCode') == ( - vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): - status_level = ( - common_constants.RouterStatus.ROUTER_STATUS_DOWN) - except ValueError: - LOG.exception(e.response) - - return status_level - - def get_edges_statuses(self): - edges_status_level = {} - edges = self._get_edges() - for edge in edges['edgePage'].get('data', []): - edge_id = edge['id'] - status = edge['edgeStatus'] - edges_status_level[edge_id] = self._edge_status_to_level(status) - - return edges_status_level - - def _update_interface(self, task): - edge_id = task.userdata['edge_id'] - config = task.userdata['config'] - LOG.debug(_("VCNS: start updating vnic %s"), config) - try: - self.vcns.update_interface(edge_id, config) - except exceptions.VcnsApiException as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n" - "%(response)s"), { - 'config': config, - 'response': e.response}) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: Failed to update vnic %d"), - config['index']) - - return constants.TaskStatus.COMPLETED - - def update_interface(self, router_id, edge_id, index, network, - address=None, netmask=None, secondary=None, - jobdata=None): - LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), { - 'index': index, 'addr': address, 'netmask': netmask}) - if index == vcns_const.EXTERNAL_VNIC_INDEX: - name = vcns_const.EXTERNAL_VNIC_NAME - intf_type = 'uplink' - elif index == vcns_const.INTERNAL_VNIC_INDEX: - name = vcns_const.INTERNAL_VNIC_NAME - intf_type = 'internal' - else: - msg = _("Vnic %d currently not supported") % index - raise exceptions.VcnsGeneralException(msg) - - config = self._assemble_edge_vnic( - name, index, network, address, netmask, secondary, type=intf_type) - - userdata = { - 'edge_id': edge_id, - 'config': config, - 'jobdata': jobdata - } - task_name = "update-interface-%s-%d" % (edge_id, index) - task = tasks.Task(task_name, router_id, - self._update_interface, userdata=userdata) - task.add_result_monitor(self.callbacks.interface_update_result) - self.task_manager.add(task) - return task - - def _deploy_edge(self, task): - userdata = task.userdata - name = userdata['router_name'] - LOG.debug(_("VCNS: start deploying edge %s"), name) - request = userdata['request'] - try: - header = self.vcns.deploy_edge(request)[0] - objuri = header['location'] - job_id = objuri[objuri.rfind("/") + 1:] - response = self.vcns.get_edge_id(job_id)[1] - edge_id = response['edgeId'] - LOG.debug(_("VCNS: deploying edge %s"), edge_id) - userdata['edge_id'] = edge_id - status = constants.TaskStatus.PENDING - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: deploy edge failed for router %s."), - name) - - return status - - def _status_edge(self, task): - edge_id = task.userdata['edge_id'] - try: - response = self.vcns.get_edge_deploy_status(edge_id)[1] - task.userdata['retries'] = 0 - system_status = response.get('systemStatus', None) - if system_status is None: - status = constants.TaskStatus.PENDING - elif system_status == 'good': - status = constants.TaskStatus.COMPLETED - else: - status = constants.TaskStatus.ERROR - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: Edge %s status query failed."), edge_id) - except Exception: - retries = task.userdata.get('retries', 0) + 1 - if retries < 3: - task.userdata['retries'] = retries - msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. " - "Retry %(retries)d.") % { - 'edge_id': edge_id, - 'retries': retries} - LOG.exception(msg) - status = constants.TaskStatus.PENDING - else: - msg = _("VCNS: Unable to retrieve edge %s status. " - "Abort.") % edge_id - LOG.exception(msg) - status = constants.TaskStatus.ERROR - LOG.debug(_("VCNS: Edge %s status"), edge_id) - return status - - def _result_edge(self, task): - router_name = task.userdata['router_name'] - edge_id = task.userdata.get('edge_id') - if task.status != constants.TaskStatus.COMPLETED: - LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s " - "for %(name)s, status %(status)d"), { - 'edge_id': edge_id, - 'name': router_name, - 'status': task.status - }) - else: - LOG.debug(_("VCNS: Edge %(edge_id)s deployed for " - "router %(name)s"), { - 'edge_id': edge_id, 'name': router_name - }) - - def _delete_edge(self, task): - edge_id = task.userdata['edge_id'] - LOG.debug(_("VCNS: start destroying edge %s"), edge_id) - status = constants.TaskStatus.COMPLETED - if edge_id: - try: - self.vcns.delete_edge(edge_id) - except exceptions.ResourceNotFound: - pass - except exceptions.VcnsApiException as e: - msg = _("VCNS: Failed to delete %(edge_id)s:\n" - "%(response)s") % { - 'edge_id': edge_id, 'response': e.response} - LOG.exception(msg) - status = constants.TaskStatus.ERROR - except Exception: - LOG.exception(_("VCNS: Failed to delete %s"), edge_id) - status = constants.TaskStatus.ERROR - - return status - - def _get_edges(self): - try: - return self.vcns.get_edges()[1] - except exceptions.VcnsApiException as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response) - - def deploy_edge(self, router_id, name, internal_network, jobdata=None, - wait_for_exec=False, loadbalancer_enable=True): - task_name = 'deploying-%s' % name - edge_name = name - edge = self._assemble_edge( - edge_name, datacenter_moid=self.datacenter_moid, - deployment_container_id=self.deployment_container_id, - appliance_size='large', remote_access=True) - appliance = self._assemble_edge_appliance(self.resource_pool_id, - self.datastore_id) - if appliance: - edge['appliances']['appliances'] = [appliance] - - vnic_external = self._assemble_edge_vnic( - vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX, - self.external_network, type="uplink") - edge['vnics']['vnics'].append(vnic_external) - vnic_inside = self._assemble_edge_vnic( - vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX, - internal_network, - vcns_const.INTEGRATION_EDGE_IPADDRESS, - vcns_const.INTEGRATION_SUBNET_NETMASK, - type="internal") - edge['vnics']['vnics'].append(vnic_inside) - if loadbalancer_enable: - self._enable_loadbalancer(edge) - userdata = { - 'request': edge, - 'router_name': name, - 'jobdata': jobdata - } - task = tasks.Task(task_name, router_id, - self._deploy_edge, - status_callback=self._status_edge, - result_callback=self._result_edge, - userdata=userdata) - task.add_executed_monitor(self.callbacks.edge_deploy_started) - task.add_result_monitor(self.callbacks.edge_deploy_result) - self.task_manager.add(task) - - if wait_for_exec: - # wait until the deploy task is executed so edge_id is available - task.wait(constants.TaskState.EXECUTED) - - return task - - def delete_edge(self, router_id, edge_id, jobdata=None): - task_name = 'delete-%s' % edge_id - userdata = { - 'router_id': router_id, - 'edge_id': edge_id, - 'jobdata': jobdata - } - task = tasks.Task(task_name, router_id, self._delete_edge, - userdata=userdata) - task.add_result_monitor(self.callbacks.edge_delete_result) - self.task_manager.add(task) - return task - - def _assemble_nat_rule(self, action, original_address, - translated_address, - vnic_index=vcns_const.EXTERNAL_VNIC_INDEX, - enabled=True): - nat_rule = {} - nat_rule['action'] = action - nat_rule['vnic'] = vnic_index - nat_rule['originalAddress'] = original_address - nat_rule['translatedAddress'] = translated_address - nat_rule['enabled'] = enabled - return nat_rule - - def get_nat_config(self, edge_id): - try: - return self.vcns.get_nat_config(edge_id)[1] - except exceptions.VcnsApiException as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_("VCNS: Failed to get nat config:\n%s"), - e.response) - - def _create_nat_rule(self, task): - # TODO(fank): use POST for optimization - # return rule_id for future reference - rule = task.userdata['rule'] - LOG.debug(_("VCNS: start creating nat rules: %s"), rule) - edge_id = task.userdata['edge_id'] - nat = self.get_nat_config(edge_id) - location = task.userdata['location'] - - del nat['version'] - - if location is None or location == vcns_const.APPEND: - nat['rules']['natRulesDtos'].append(rule) - else: - nat['rules']['natRulesDtos'].insert(location, rule) - - try: - self.vcns.update_nat_config(edge_id, nat) - status = constants.TaskStatus.COMPLETED - except exceptions.VcnsApiException as e: - LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), - e.response) - status = constants.TaskStatus.ERROR - - return status - - def create_snat_rule(self, router_id, edge_id, src, translated, - jobdata=None, location=None): - LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), { - 'src': src, 'translated': translated}) - snat_rule = self._assemble_nat_rule("snat", src, translated) - userdata = { - 'router_id': router_id, - 'edge_id': edge_id, - 'rule': snat_rule, - 'location': location, - 'jobdata': jobdata - } - task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated) - task = tasks.Task(task_name, router_id, self._create_nat_rule, - userdata=userdata) - task.add_result_monitor(self.callbacks.snat_create_result) - self.task_manager.add(task) - return task - - def _delete_nat_rule(self, task): - # TODO(fank): pass in rule_id for optimization - # handle routes update for optimization - edge_id = task.userdata['edge_id'] - address = task.userdata['address'] - addrtype = task.userdata['addrtype'] - LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), { - 'type': addrtype, 'addr': address}) - nat = self.get_nat_config(edge_id) - del nat['version'] - status = constants.TaskStatus.COMPLETED - for nat_rule in nat['rules']['natRulesDtos']: - if nat_rule[addrtype] == address: - rule_id = nat_rule['ruleId'] - try: - self.vcns.delete_nat_rule(edge_id, rule_id) - except exceptions.VcnsApiException as e: - LOG.exception(_("VCNS: Failed to delete snat rule:\n" - "%s"), e.response) - status = constants.TaskStatus.ERROR - - return status - - def delete_snat_rule(self, router_id, edge_id, src, jobdata=None): - LOG.debug(_("VCNS: delete snat rule %s"), src) - userdata = { - 'edge_id': edge_id, - 'address': src, - 'addrtype': 'originalAddress', - 'jobdata': jobdata - } - task_name = "delete-snat-%s-%s" % (edge_id, src) - task = tasks.Task(task_name, router_id, self._delete_nat_rule, - userdata=userdata) - task.add_result_monitor(self.callbacks.snat_delete_result) - self.task_manager.add(task) - return task - - def create_dnat_rule(self, router_id, edge_id, dst, translated, - jobdata=None, location=None): - # TODO(fank): use POST for optimization - # return rule_id for future reference - LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), { - 'dst': dst, 'translated': translated}) - dnat_rule = self._assemble_nat_rule( - "dnat", dst, translated) - userdata = { - 'router_id': router_id, - 'edge_id': edge_id, - 'rule': dnat_rule, - 'location': location, - 'jobdata': jobdata - } - task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated) - task = tasks.Task(task_name, router_id, self._create_nat_rule, - userdata=userdata) - task.add_result_monitor(self.callbacks.dnat_create_result) - self.task_manager.add(task) - return task - - def delete_dnat_rule(self, router_id, edge_id, translated, - jobdata=None): - # TODO(fank): pass in rule_id for optimization - LOG.debug(_("VCNS: delete dnat rule %s"), translated) - userdata = { - 'edge_id': edge_id, - 'address': translated, - 'addrtype': 'translatedAddress', - 'jobdata': jobdata - } - task_name = "delete-dnat-%s-%s" % (edge_id, translated) - task = tasks.Task(task_name, router_id, self._delete_nat_rule, - userdata=userdata) - task.add_result_monitor(self.callbacks.dnat_delete_result) - self.task_manager.add(task) - return task - - def _update_nat_rule(self, task): - # TODO(fank): use POST for optimization - # return rule_id for future reference - edge_id = task.userdata['edge_id'] - if task != self.updated_task['nat'][edge_id]: - # this task does not have the latest config, abort now - # for speedup - return constants.TaskStatus.ABORT - - rules = task.userdata['rules'] - LOG.debug(_("VCNS: start updating nat rules: %s"), rules) - - nat = { - 'featureType': 'nat', - 'rules': { - 'natRulesDtos': rules - } - } - - try: - self.vcns.update_nat_config(edge_id, nat) - status = constants.TaskStatus.COMPLETED - except exceptions.VcnsApiException as e: - LOG.exception(_("VCNS: Failed to create snat rule:\n%s"), - e.response) - status = constants.TaskStatus.ERROR - - return status - - def update_nat_rules(self, router_id, edge_id, snats, dnats, - jobdata=None): - LOG.debug(_("VCNS: update nat rule\n" - "SNAT:%(snat)s\n" - "DNAT:%(dnat)s\n"), { - 'snat': snats, 'dnat': dnats}) - nat_rules = [] - - for dnat in dnats: - nat_rules.append(self._assemble_nat_rule( - 'dnat', dnat['dst'], dnat['translated'])) - nat_rules.append(self._assemble_nat_rule( - 'snat', dnat['translated'], dnat['dst'])) - - for snat in snats: - nat_rules.append(self._assemble_nat_rule( - 'snat', snat['src'], snat['translated'])) - - userdata = { - 'edge_id': edge_id, - 'rules': nat_rules, - 'jobdata': jobdata, - } - task_name = "update-nat-%s" % edge_id - task = tasks.Task(task_name, router_id, self._update_nat_rule, - userdata=userdata) - task.add_result_monitor(self.callbacks.nat_update_result) - self.updated_task['nat'][edge_id] = task - self.task_manager.add(task) - return task - - def _update_routes(self, task): - edge_id = task.userdata['edge_id'] - if (task != self.updated_task['route'][edge_id] and - task.userdata.get('skippable', True)): - # this task does not have the latest config, abort now - # for speedup - return constants.TaskStatus.ABORT - gateway = task.userdata['gateway'] - routes = task.userdata['routes'] - LOG.debug(_("VCNS: start updating routes for %s"), edge_id) - static_routes = [] - for route in routes: - static_routes.append({ - "description": "", - "vnic": vcns_const.INTERNAL_VNIC_INDEX, - "network": route['cidr'], - "nextHop": route['nexthop'] - }) - request = { - "staticRoutes": { - "staticRoutes": static_routes - } - } - if gateway: - request["defaultRoute"] = { - "description": "default-gateway", - "gatewayAddress": gateway, - "vnic": vcns_const.EXTERNAL_VNIC_INDEX - } - try: - self.vcns.update_routes(edge_id, request) - status = constants.TaskStatus.COMPLETED - except exceptions.VcnsApiException as e: - LOG.exception(_("VCNS: Failed to update routes:\n%s"), - e.response) - status = constants.TaskStatus.ERROR - - return status - - def update_routes(self, router_id, edge_id, gateway, routes, - skippable=True, jobdata=None): - if gateway: - gateway = gateway.split('/')[0] - - userdata = { - 'edge_id': edge_id, - 'gateway': gateway, - 'routes': routes, - 'skippable': skippable, - 'jobdata': jobdata - } - task_name = "update-routes-%s" % (edge_id) - task = tasks.Task(task_name, router_id, self._update_routes, - userdata=userdata) - task.add_result_monitor(self.callbacks.routes_update_result) - self.updated_task['route'][edge_id] = task - self.task_manager.add(task) - return task - - def create_lswitch(self, name, tz_config, tags=None, - port_isolation=False, replication_mode="service"): - lsconfig = { - 'display_name': utils.check_and_truncate(name), - "tags": tags or [], - "type": "LogicalSwitchConfig", - "_schema": "/ws.v1/schema/LogicalSwitchConfig", - "transport_zones": tz_config - } - if port_isolation is bool: - lsconfig["port_isolation_enabled"] = port_isolation - if replication_mode: - lsconfig["replication_mode"] = replication_mode - - response = self.vcns.create_lswitch(lsconfig)[1] - return response - - def delete_lswitch(self, lswitch_id): - self.vcns.delete_lswitch(lswitch_id) - - def get_loadbalancer_config(self, edge_id): - try: - header, response = self.vcns.get_loadbalancer_config( - edge_id) - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get service config")) - return response - - def enable_service_loadbalancer(self, edge_id): - config = self.get_loadbalancer_config( - edge_id) - if not config['enabled']: - config['enabled'] = True - try: - self.vcns.enable_service_loadbalancer(edge_id, config) - except exceptions.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to enable loadbalancer " - "service config")) diff --git a/neutron/plugins/vmware/vshield/edge_firewall_driver.py b/neutron/plugins/vmware/vshield/edge_firewall_driver.py deleted file mode 100644 index f2e899645..000000000 --- a/neutron/plugins/vmware/vshield/edge_firewall_driver.py +++ /dev/null @@ -1,354 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Leon Cui, VMware - -from neutron.db import db_base_plugin_v2 -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.common import constants -from neutron.plugins.vmware.dbexts import vcns_db -from neutron.plugins.vmware.vshield.common import ( - exceptions as vcns_exc) - -LOG = logging.getLogger(__name__) - -VSE_FWAAS_ALLOW = "accept" -VSE_FWAAS_DENY = "deny" - - -class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2): - """Implementation of driver APIs for - Edge Firewall feature configuration - """ - def _convert_firewall_action(self, action): - if action == constants.FWAAS_ALLOW: - return VSE_FWAAS_ALLOW - elif action == constants.FWAAS_DENY: - return VSE_FWAAS_DENY - else: - msg = _("Invalid action value %s in a firewall rule") % action - raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) - - def _restore_firewall_action(self, action): - if action == VSE_FWAAS_ALLOW: - return constants.FWAAS_ALLOW - elif action == VSE_FWAAS_DENY: - return constants.FWAAS_DENY - else: - msg = (_("Invalid action value %s in " - "a vshield firewall rule") % action) - raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) - - def _get_port_range_from_min_max_ports(self, min_port, max_port): - if not min_port: - return None - if min_port == max_port: - return str(min_port) - else: - return '%d:%d' % (min_port, max_port) - - def _get_min_max_ports_from_range(self, port_range): - if not port_range: - return [None, None] - min_port, sep, max_port = port_range.partition(":") - if not max_port: - max_port = min_port - return [int(min_port), int(max_port)] - - def _convert_firewall_rule(self, context, rule, index=None): - vcns_rule = { - "name": rule['name'], - "description": rule['description'], - "action": self._convert_firewall_action(rule['action']), - "enabled": rule['enabled']} - if rule.get('source_ip_address'): - vcns_rule['source'] = { - "ipAddress": [rule['source_ip_address']] - } - if rule.get('destination_ip_address'): - vcns_rule['destination'] = { - "ipAddress": [rule['destination_ip_address']] - } - service = {} - if rule.get('source_port'): - min_port, max_port = self._get_min_max_ports_from_range( - rule['source_port']) - service['sourcePort'] = [i for i in range(min_port, max_port + 1)] - if rule.get('destination_port'): - min_port, max_port = self._get_min_max_ports_from_range( - rule['destination_port']) - service['port'] = [i for i in range(min_port, max_port + 1)] - if rule.get('protocol'): - service['protocol'] = rule['protocol'] - if service: - vcns_rule['application'] = { - 'service': [service] - } - if index: - vcns_rule['ruleTag'] = index - return vcns_rule - - def _restore_firewall_rule(self, context, edge_id, response): - rule = response - rule_binding = vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( - context.session, edge_id, rule['ruleId']) - service = rule['application']['service'][0] - src_port_range = self._get_port_range_from_min_max_ports( - service['sourcePort'][0], service['sourcePort'][-1]) - dst_port_range = self._get_port_range_from_min_max_ports( - service['port'][0], service['port'][-1]) - return { - 'firewall_rule': { - 'name': rule['name'], - 'id': rule_binding['rule_id'], - 'description': rule['description'], - 'source_ip_address': rule['source']['ipAddress'][0], - 'destination_ip_address': rule['destination']['ipAddress'][0], - 'protocol': service['protocol'], - 'destination_port': dst_port_range, - 'source_port': src_port_range, - 'action': self._restore_firewall_action(rule['action']), - 'enabled': rule['enabled']}} - - def _convert_firewall(self, context, firewall): - #bulk configuration on firewall and rescheduling the rule binding - ruleTag = 1 - vcns_rules = [] - for rule in firewall['firewall_rule_list']: - vcns_rule = self._convert_firewall_rule(context, rule, ruleTag) - vcns_rules.append(vcns_rule) - ruleTag += 1 - return { - 'featureType': "firewall_4.0", - 'firewallRules': { - 'firewallRules': vcns_rules}} - - def _restore_firewall(self, context, edge_id, response): - res = {} - res['firewall_rule_list'] = [] - for rule in response['firewallRules']['firewallRules']: - rule_binding = ( - vcns_db.get_vcns_edge_firewallrule_binding_by_vseid( - context.session, edge_id, rule['ruleId'])) - if rule_binding is None: - continue - service = rule['application']['service'][0] - src_port_range = self._get_port_range_from_min_max_ports( - service['sourcePort'][0], service['sourcePort'][-1]) - dst_port_range = self._get_port_range_from_min_max_ports( - service['port'][0], service['port'][-1]) - item = { - 'firewall_rule': { - 'name': rule['name'], - 'id': rule_binding['rule_id'], - 'description': rule['description'], - 'source_ip_address': rule['source']['ipAddress'][0], - 'destination_ip_address': rule[ - 'destination']['ipAddress'][0], - 'protocol': service['protocol'], - 'destination_port': dst_port_range, - 'source_port': src_port_range, - 'action': self._restore_firewall_action(rule['action']), - 'enabled': rule['enabled']}} - res['firewall_rule_list'].append(item) - return res - - def _create_rule_id_mapping( - self, context, edge_id, firewall, vcns_fw): - for rule in vcns_fw['firewallRules']['firewallRules']: - index = rule['ruleTag'] - 1 - #TODO(linb):a simple filter of the retrived rules which may be - #created by other operations unintentionally - if index < len(firewall['firewall_rule_list']): - rule_vseid = rule['ruleId'] - rule_id = firewall['firewall_rule_list'][index]['id'] - map_info = { - 'rule_id': rule_id, - 'rule_vseid': rule_vseid, - 'edge_id': edge_id - } - vcns_db.add_vcns_edge_firewallrule_binding( - context.session, map_info) - - def _get_firewall(self, context, edge_id): - try: - return self.vcns.get_firewall(edge_id)[1] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get firewall with edge " - "id: %s"), edge_id) - - def _get_firewall_rule_next(self, context, edge_id, rule_vseid): - # Return the firewall rule below 'rule_vseid' - fw_cfg = self._get_firewall(context, edge_id) - for i in range(len(fw_cfg['firewallRules']['firewallRules'])): - rule_cur = fw_cfg['firewallRules']['firewallRules'][i] - if str(rule_cur['ruleId']) == rule_vseid: - if (i + 1) == len(fw_cfg['firewallRules']['firewallRules']): - return None - else: - return fw_cfg['firewallRules']['firewallRules'][i + 1] - - def get_firewall_rule(self, context, id, edge_id): - rule_map = vcns_db.get_vcns_edge_firewallrule_binding( - context.session, id, edge_id) - if rule_map is None: - msg = _("No rule id:%s found in the edge_firewall_binding") % id - LOG.error(msg) - raise vcns_exc.VcnsNotFound( - resource='vcns_firewall_rule_bindings', msg=msg) - vcns_rule_id = rule_map.rule_vseid - try: - response = self.vcns.get_firewall_rule( - edge_id, vcns_rule_id)[1] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get firewall rule: %(rule_id)s " - "with edge_id: %(edge_id)s"), { - 'rule_id': id, - 'edge_id': edge_id}) - return self._restore_firewall_rule(context, edge_id, response) - - def get_firewall(self, context, edge_id): - response = self._get_firewall(context, edge_id) - return self._restore_firewall(context, edge_id, response) - - def update_firewall(self, context, edge_id, firewall): - fw_req = self._convert_firewall(context, firewall) - try: - self.vcns.update_firewall(edge_id, fw_req) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update firewall " - "with edge_id: %s"), edge_id) - fw_res = self._get_firewall(context, edge_id) - vcns_db.cleanup_vcns_edge_firewallrule_binding( - context.session, edge_id) - self._create_rule_id_mapping(context, edge_id, firewall, fw_res) - - def delete_firewall(self, context, edge_id): - try: - self.vcns.delete_firewall(edge_id) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete firewall " - "with edge_id:%s"), edge_id) - vcns_db.cleanup_vcns_edge_firewallrule_binding( - context.session, edge_id) - - def update_firewall_rule(self, context, id, edge_id, firewall_rule): - rule_map = vcns_db.get_vcns_edge_firewallrule_binding( - context.session, id, edge_id) - vcns_rule_id = rule_map.rule_vseid - fwr_req = self._convert_firewall_rule(context, firewall_rule) - try: - self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update firewall rule: %(rule_id)s " - "with edge_id: %(edge_id)s"), - {'rule_id': id, - 'edge_id': edge_id}) - - def delete_firewall_rule(self, context, id, edge_id): - rule_map = vcns_db.get_vcns_edge_firewallrule_binding( - context.session, id, edge_id) - vcns_rule_id = rule_map.rule_vseid - try: - self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete firewall rule: %(rule_id)s " - "with edge_id: %(edge_id)s"), - {'rule_id': id, - 'edge_id': edge_id}) - vcns_db.delete_vcns_edge_firewallrule_binding( - context.session, id, edge_id) - - def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule): - rule_map = vcns_db.get_vcns_edge_firewallrule_binding( - context.session, ref_rule_id, edge_id) - ref_vcns_rule_id = rule_map.rule_vseid - fwr_req = self._convert_firewall_rule(context, firewall_rule) - try: - header = self.vcns.add_firewall_rule_above( - edge_id, ref_vcns_rule_id, fwr_req)[0] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to add firewall rule above: " - "%(rule_id)s with edge_id: %(edge_id)s"), - {'rule_id': ref_vcns_rule_id, - 'edge_id': edge_id}) - - objuri = header['location'] - fwr_vseid = objuri[objuri.rfind("/") + 1:] - map_info = { - 'rule_id': firewall_rule['id'], - 'rule_vseid': fwr_vseid, - 'edge_id': edge_id} - vcns_db.add_vcns_edge_firewallrule_binding( - context.session, map_info) - - def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): - rule_map = vcns_db.get_vcns_edge_firewallrule_binding( - context.session, ref_rule_id, edge_id) - ref_vcns_rule_id = rule_map.rule_vseid - fwr_vse_next = self._get_firewall_rule_next( - context, edge_id, ref_vcns_rule_id) - fwr_req = self._convert_firewall_rule(context, firewall_rule) - if fwr_vse_next: - ref_vcns_rule_id = fwr_vse_next['ruleId'] - try: - header = self.vcns.add_firewall_rule_above( - edge_id, int(ref_vcns_rule_id), fwr_req)[0] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to add firewall rule above: " - "%(rule_id)s with edge_id: %(edge_id)s"), - {'rule_id': ref_vcns_rule_id, - 'edge_id': edge_id}) - else: - # append the rule at the bottom - try: - header = self.vcns.add_firewall_rule( - edge_id, fwr_req)[0] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to append a firewall rule" - "with edge_id: %s"), edge_id) - - objuri = header['location'] - fwr_vseid = objuri[objuri.rfind("/") + 1:] - map_info = { - 'rule_id': firewall_rule['id'], - 'rule_vseid': fwr_vseid, - 'edge_id': edge_id - } - vcns_db.add_vcns_edge_firewallrule_binding( - context.session, map_info) - - def insert_rule(self, context, rule_info, edge_id, fwr): - if rule_info.get('insert_before'): - self._add_rule_above( - context, rule_info['insert_before'], edge_id, fwr) - elif rule_info.get('insert_after'): - self._add_rule_below( - context, rule_info['insert_after'], edge_id, fwr) - else: - msg = _("Can't execute insert rule operation " - "without reference rule_id") - raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) diff --git a/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py b/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py deleted file mode 100644 index 7e74fe1f8..000000000 --- a/neutron/plugins/vmware/vshield/edge_ipsecvpn_driver.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.vshield.common import ( - exceptions as vcns_exc) - -LOG = logging.getLogger(__name__) - -ENCRYPTION_ALGORITHM_MAP = { - '3des': '3des', - 'aes-128': 'aes', - 'aes-256': 'aes256' -} - -PFS_MAP = { - 'group2': 'dh2', - 'group5': 'dh5'} - -TRANSFORM_PROTOCOL_ALLOWED = ('esp',) - -ENCAPSULATION_MODE_ALLOWED = ('tunnel',) - - -class EdgeIPsecVpnDriver(): - - """Driver APIs for Edge IPsec VPN bulk configuration.""" - - def _check_ikepolicy_ipsecpolicy_allowed(self, ikepolicy, ipsecpolicy): - """Check whether ikepolicy and ipsecpolicy are allowed on vshield edge. - - Some IPsec VPN configurations and features are configured by default or - not supported on vshield edge. - - """ - # Check validation of IKEPolicy. - if ikepolicy['ike_version'] != 'v1': - msg = _("Unsupported ike_version: %s! Only 'v1' ike version is " - "supported on vshield Edge!" - ) % ikepolicy['ike_version'] - LOG.warning(msg) - raise vcns_exc.VcnsBadRequest(resource='ikepolicy', - msg=msg) - - # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm - # and authentication algorithms setting. At present, just record the - # discrepancy error in log and take ipsecpolicy to do configuration. - if (ikepolicy['auth_algorithm'] != ipsecpolicy['auth_algorithm'] or - ikepolicy['encryption_algorithm'] != ipsecpolicy[ - 'encryption_algorithm'] or - ikepolicy['pfs'] != ipsecpolicy['pfs']): - msg = _("IKEPolicy and IPsecPolicy should have consistent " - "auth_algorithm, encryption_algorithm and pfs for VSE!") - LOG.warning(msg) - - # Check whether encryption_algorithm is allowed. - encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get( - ipsecpolicy.get('encryption_algorithm'), None) - if not encryption_algorithm: - msg = _("Unsupported encryption_algorithm: %s! '3des', " - "'aes-128' and 'aes-256' are supported on VSE right now." - ) % ipsecpolicy['encryption_algorithm'] - LOG.warning(msg) - raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', - msg=msg) - - # Check whether pfs is allowed. - if not PFS_MAP.get(ipsecpolicy['pfs']): - msg = _("Unsupported pfs: %s! 'group2' and 'group5' " - "are supported on VSE right now.") % ipsecpolicy['pfs'] - LOG.warning(msg) - raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', - msg=msg) - - # Check whether transform protocol is allowed. - if ipsecpolicy['transform_protocol'] not in TRANSFORM_PROTOCOL_ALLOWED: - msg = _("Unsupported transform protocol: %s! 'esp' is supported " - "by default on VSE right now." - ) % ipsecpolicy['transform_protocol'] - LOG.warning(msg) - raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', - msg=msg) - - # Check whether encapsulation mode is allowed. - if ipsecpolicy['encapsulation_mode'] not in ENCAPSULATION_MODE_ALLOWED: - msg = _("Unsupported encapsulation mode: %s! 'tunnel' is " - "supported by default on VSE right now." - ) % ipsecpolicy['encapsulation_mode'] - LOG.warning(msg) - raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', - msg=msg) - - def _convert_ipsec_site(self, site, enablePfs=True): - self._check_ikepolicy_ipsecpolicy_allowed( - site['ikepolicy'], site['ipsecpolicy']) - return { - 'enabled': site['site'].get('admin_state_up'), - 'enablePfs': enablePfs, - 'dhGroup': PFS_MAP.get(site['ipsecpolicy']['pfs']), - 'name': site['site'].get('name'), - 'description': site['site'].get('description'), - 'localId': site['external_ip'], - 'localIp': site['external_ip'], - 'peerId': site['site'].get('peer_id'), - 'peerIp': site['site'].get('peer_address'), - 'localSubnets': { - 'subnets': [site['subnet'].get('cidr')]}, - 'peerSubnets': { - 'subnets': site['site'].get('peer_cidrs')}, - 'authenticationMode': site['site'].get('auth_mode'), - 'psk': site['site'].get('psk'), - 'encryptionAlgorithm': ENCRYPTION_ALGORITHM_MAP.get( - site['ipsecpolicy'].get('encryption_algorithm'))} - - def update_ipsec_config(self, edge_id, sites, enabled=True): - ipsec_config = {'featureType': "ipsec_4.0", - 'enabled': enabled} - vse_sites = [self._convert_ipsec_site(site) for site in sites] - ipsec_config['sites'] = {'sites': vse_sites} - try: - self.vcns.update_ipsec_config(edge_id, ipsec_config) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update ipsec vpn configuration " - "with edge_id: %s"), edge_id) - - def delete_ipsec_config(self, edge_id): - try: - self.vcns.delete_ipsec_config(edge_id) - except vcns_exc.ResourceNotFound: - LOG.warning(_("IPsec config not found on edge: %s"), edge_id) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete ipsec vpn configuration " - "with edge_id: %s"), edge_id) - - def get_ipsec_config(self, edge_id): - return self.vcns.get_ipsec_config(edge_id) diff --git a/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py b/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py deleted file mode 100644 index cb9e24eaa..000000000 --- a/neutron/plugins/vmware/vshield/edge_loadbalancer_driver.py +++ /dev/null @@ -1,403 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Leon Cui, VMware - -from neutron.openstack.common import excutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.dbexts import vcns_db -from neutron.plugins.vmware.vshield.common import ( - constants as vcns_const) -from neutron.plugins.vmware.vshield.common import ( - exceptions as vcns_exc) -from neutron.services.loadbalancer import constants as lb_constants - -LOG = logging.getLogger(__name__) - -BALANCE_MAP = { - lb_constants.LB_METHOD_ROUND_ROBIN: 'round-robin', - lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', - lb_constants.LB_METHOD_SOURCE_IP: 'source' -} -PROTOCOL_MAP = { - lb_constants.PROTOCOL_TCP: 'tcp', - lb_constants.PROTOCOL_HTTP: 'http', - lb_constants.PROTOCOL_HTTPS: 'tcp' -} -SESSION_PERSISTENCE_METHOD_MAP = { - lb_constants.SESSION_PERSISTENCE_SOURCE_IP: 'sourceip', - lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'cookie', - lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'} -SESSION_PERSISTENCE_COOKIE_MAP = { - lb_constants.SESSION_PERSISTENCE_APP_COOKIE: 'app', - lb_constants.SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'} - - -class EdgeLbDriver(): - """Implementation of driver APIs for - Edge Loadbalancer feature configuration - """ - - def _convert_lb_vip(self, context, edge_id, vip, app_profileid): - pool_id = vip.get('pool_id') - poolid_map = vcns_db.get_vcns_edge_pool_binding( - context.session, pool_id, edge_id) - pool_vseid = poolid_map['pool_vseid'] - return { - 'name': vip.get( - 'name', '') + vip['id'][-vcns_const.SUFFIX_LENGTH:], - 'description': vip.get('description'), - 'ipAddress': vip.get('address'), - 'protocol': vip.get('protocol'), - 'port': vip.get('protocol_port'), - 'connectionLimit': max(0, vip.get('connection_limit')), - 'defaultPoolId': pool_vseid, - 'applicationProfileId': app_profileid - } - - def _restore_lb_vip(self, context, edge_id, vip_vse): - pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid( - context.session, - edge_id, - vip_vse['defaultPoolId']) - - return { - 'name': vip_vse['name'][:-vcns_const.SUFFIX_LENGTH], - 'address': vip_vse['ipAddress'], - 'protocol': vip_vse['protocol'], - 'protocol_port': vip_vse['port'], - 'pool_id': pool_binding['pool_id'] - } - - def _convert_lb_pool(self, context, edge_id, pool, members): - vsepool = { - 'name': pool.get( - 'name', '') + pool['id'][-vcns_const.SUFFIX_LENGTH:], - 'description': pool.get('description'), - 'algorithm': BALANCE_MAP.get( - pool.get('lb_method'), - 'round-robin'), - 'transparent': True, - 'member': [], - 'monitorId': [] - } - for member in members: - vsepool['member'].append({ - 'ipAddress': member['address'], - 'weight': member['weight'], - 'port': member['protocol_port'] - }) - ##TODO(linb) right now, vse only accept at most one monitor per pool - monitors = pool.get('health_monitors') - if not monitors: - return vsepool - monitorid_map = vcns_db.get_vcns_edge_monitor_binding( - context.session, - monitors[0], - edge_id) - vsepool['monitorId'].append(monitorid_map['monitor_vseid']) - return vsepool - - def _restore_lb_pool(self, context, edge_id, pool_vse): - #TODO(linb): Get more usefule info - return { - 'name': pool_vse['name'][:-vcns_const.SUFFIX_LENGTH], - } - - def _convert_lb_monitor(self, context, monitor): - return { - 'type': PROTOCOL_MAP.get( - monitor.get('type'), 'http'), - 'interval': monitor.get('delay'), - 'timeout': monitor.get('timeout'), - 'maxRetries': monitor.get('max_retries'), - 'name': monitor.get('id') - } - - def _restore_lb_monitor(self, context, edge_id, monitor_vse): - return { - 'delay': monitor_vse['interval'], - 'timeout': monitor_vse['timeout'], - 'max_retries': monitor_vse['maxRetries'], - 'id': monitor_vse['name'] - } - - def _convert_app_profile(self, name, sess_persist, protocol): - vcns_app_profile = { - 'insertXForwardedFor': False, - 'name': name, - 'serverSslEnabled': False, - 'sslPassthrough': False, - 'template': protocol, - } - # Since SSL Termination is not supported right now, so just use - # sslPassthrough mehtod if the protocol is HTTPS. - if protocol == lb_constants.PROTOCOL_HTTPS: - vcns_app_profile['sslPassthrough'] = True - - if sess_persist.get('type'): - # If protocol is not HTTP, only sourceip is supported - if (protocol != lb_constants.PROTOCOL_HTTP and - sess_persist['type'] != ( - lb_constants.SESSION_PERSISTENCE_SOURCE_IP)): - msg = (_("Invalid %(protocol)s persistence method: %(type)s") % - {'protocol': protocol, - 'type': sess_persist['type']}) - raise vcns_exc.VcnsBadRequest(resource='sess_persist', msg=msg) - persistence = { - 'method': SESSION_PERSISTENCE_METHOD_MAP.get( - sess_persist['type'])} - if sess_persist['type'] in SESSION_PERSISTENCE_COOKIE_MAP: - if sess_persist.get('cookie_name'): - persistence['cookieName'] = sess_persist['cookie_name'] - else: - persistence['cookieName'] = 'default_cookie_name' - persistence['cookieMode'] = SESSION_PERSISTENCE_COOKIE_MAP.get( - sess_persist['type']) - vcns_app_profile['persistence'] = persistence - return vcns_app_profile - - def create_vip(self, context, edge_id, vip): - app_profile = self._convert_app_profile( - vip['name'], (vip.get('session_persistence') or {}), - vip.get('protocol')) - try: - header, response = self.vcns.create_app_profile( - edge_id, app_profile) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create app profile on edge: %s"), - edge_id) - objuri = header['location'] - app_profileid = objuri[objuri.rfind("/") + 1:] - - vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) - try: - header, response = self.vcns.create_vip( - edge_id, vip_new) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create vip on vshield edge: %s"), - edge_id) - self.vcns.delete_app_profile(edge_id, app_profileid) - objuri = header['location'] - vip_vseid = objuri[objuri.rfind("/") + 1:] - - # Add the vip mapping - map_info = { - "vip_id": vip['id'], - "vip_vseid": vip_vseid, - "edge_id": edge_id, - "app_profileid": app_profileid - } - vcns_db.add_vcns_edge_vip_binding(context.session, map_info) - - def _get_vip_binding(self, session, id): - vip_binding = vcns_db.get_vcns_edge_vip_binding(session, id) - if not vip_binding: - msg = (_("vip_binding not found with id: %(id)s " - "edge_id: %(edge_id)s") % { - 'id': id, - 'edge_id': vip_binding[vcns_const.EDGE_ID]}) - LOG.error(msg) - raise vcns_exc.VcnsNotFound( - resource='router_service_binding', msg=msg) - return vip_binding - - def get_vip(self, context, id): - vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id) - edge_id = vip_binding[vcns_const.EDGE_ID] - vip_vseid = vip_binding['vip_vseid'] - try: - response = self.vcns.get_vip(edge_id, vip_vseid)[1] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get vip on edge")) - return self._restore_lb_vip(context, edge_id, response) - - def update_vip(self, context, vip, session_persistence_update=True): - vip_binding = self._get_vip_binding(context.session, vip['id']) - edge_id = vip_binding[vcns_const.EDGE_ID] - vip_vseid = vip_binding.get('vip_vseid') - if session_persistence_update: - app_profileid = vip_binding.get('app_profileid') - app_profile = self._convert_app_profile( - vip['name'], vip.get('session_persistence', {}), - vip.get('protocol')) - try: - self.vcns.update_app_profile( - edge_id, app_profileid, app_profile) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update app profile on " - "edge: %s") % edge_id) - - vip_new = self._convert_lb_vip(context, edge_id, vip, app_profileid) - try: - self.vcns.update_vip(edge_id, vip_vseid, vip_new) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update vip on edge: %s") % edge_id) - - def delete_vip(self, context, id): - vip_binding = self._get_vip_binding(context.session, id) - edge_id = vip_binding[vcns_const.EDGE_ID] - vip_vseid = vip_binding['vip_vseid'] - app_profileid = vip_binding['app_profileid'] - - try: - self.vcns.delete_vip(edge_id, vip_vseid) - except vcns_exc.ResourceNotFound: - LOG.exception(_("vip not found on edge: %s") % edge_id) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete vip on edge: %s") % edge_id) - - try: - self.vcns.delete_app_profile(edge_id, app_profileid) - except vcns_exc.ResourceNotFound: - LOG.exception(_("app profile not found on edge: %s") % edge_id) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete app profile on edge: %s") % - edge_id) - - vcns_db.delete_vcns_edge_vip_binding(context.session, id) - - def create_pool(self, context, edge_id, pool, members): - pool_new = self._convert_lb_pool(context, edge_id, pool, members) - try: - header = self.vcns.create_pool(edge_id, pool_new)[0] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create pool")) - - objuri = header['location'] - pool_vseid = objuri[objuri.rfind("/") + 1:] - - # update the pool mapping table - map_info = { - "pool_id": pool['id'], - "pool_vseid": pool_vseid, - "edge_id": edge_id - } - vcns_db.add_vcns_edge_pool_binding(context.session, map_info) - - def get_pool(self, context, id, edge_id): - pool_binding = vcns_db.get_vcns_edge_pool_binding( - context.session, id, edge_id) - if not pool_binding: - msg = (_("pool_binding not found with id: %(id)s " - "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) - LOG.error(msg) - raise vcns_exc.VcnsNotFound( - resource='router_service_binding', msg=msg) - pool_vseid = pool_binding['pool_vseid'] - try: - response = self.vcns.get_pool(edge_id, pool_vseid)[1] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get pool on edge")) - return self._restore_lb_pool(context, edge_id, response) - - def update_pool(self, context, edge_id, pool, members): - pool_binding = vcns_db.get_vcns_edge_pool_binding( - context.session, pool['id'], edge_id) - pool_vseid = pool_binding['pool_vseid'] - pool_new = self._convert_lb_pool(context, edge_id, pool, members) - try: - self.vcns.update_pool(edge_id, pool_vseid, pool_new) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update pool")) - - def delete_pool(self, context, id, edge_id): - pool_binding = vcns_db.get_vcns_edge_pool_binding( - context.session, id, edge_id) - pool_vseid = pool_binding['pool_vseid'] - try: - self.vcns.delete_pool(edge_id, pool_vseid) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete pool")) - vcns_db.delete_vcns_edge_pool_binding( - context.session, id, edge_id) - - def create_health_monitor(self, context, edge_id, health_monitor): - monitor_new = self._convert_lb_monitor(context, health_monitor) - try: - header = self.vcns.create_health_monitor(edge_id, monitor_new)[0] - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to create monitor on edge: %s"), - edge_id) - - objuri = header['location'] - monitor_vseid = objuri[objuri.rfind("/") + 1:] - - # update the health_monitor mapping table - map_info = { - "monitor_id": health_monitor['id'], - "monitor_vseid": monitor_vseid, - "edge_id": edge_id - } - vcns_db.add_vcns_edge_monitor_binding(context.session, map_info) - - def get_health_monitor(self, context, id, edge_id): - monitor_binding = vcns_db.get_vcns_edge_monitor_binding( - context.session, id, edge_id) - if not monitor_binding: - msg = (_("monitor_binding not found with id: %(id)s " - "edge_id: %(edge_id)s") % {'id': id, 'edge_id': edge_id}) - LOG.error(msg) - raise vcns_exc.VcnsNotFound( - resource='router_service_binding', msg=msg) - monitor_vseid = monitor_binding['monitor_vseid'] - try: - response = self.vcns.get_health_monitor(edge_id, monitor_vseid)[1] - except vcns_exc.VcnsApiException as e: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to get monitor on edge: %s"), - e.response) - return self._restore_lb_monitor(context, edge_id, response) - - def update_health_monitor(self, context, edge_id, - old_health_monitor, health_monitor): - monitor_binding = vcns_db.get_vcns_edge_monitor_binding( - context.session, - old_health_monitor['id'], edge_id) - monitor_vseid = monitor_binding['monitor_vseid'] - monitor_new = self._convert_lb_monitor( - context, health_monitor) - try: - self.vcns.update_health_monitor( - edge_id, monitor_vseid, monitor_new) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to update monitor on edge: %s"), - edge_id) - - def delete_health_monitor(self, context, id, edge_id): - monitor_binding = vcns_db.get_vcns_edge_monitor_binding( - context.session, id, edge_id) - monitor_vseid = monitor_binding['monitor_vseid'] - try: - self.vcns.delete_health_monitor(edge_id, monitor_vseid) - except vcns_exc.VcnsApiException: - with excutils.save_and_reraise_exception(): - LOG.exception(_("Failed to delete monitor")) - vcns_db.delete_vcns_edge_monitor_binding( - context.session, id, edge_id) diff --git a/neutron/plugins/vmware/vshield/tasks/__init__.py b/neutron/plugins/vmware/vshield/tasks/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron/plugins/vmware/vshield/tasks/constants.py b/neutron/plugins/vmware/vshield/tasks/constants.py deleted file mode 100644 index f5322e0b9..000000000 --- a/neutron/plugins/vmware/vshield/tasks/constants.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class TaskStatus(object): - """Task running status. - - This is used by execution/status callback function to notify the - task manager what's the status of current task, and also used for - indication the final task execution result. - """ - PENDING = 1 - COMPLETED = 2 - ERROR = 3 - ABORT = 4 - - -class TaskState(object): - """Current state of a task. - - This is to keep track of the current state of a task. - NONE: the task is still in the queue - START: the task is pull out from the queue and is about to be executed - EXECUTED: the task has been executed - STATUS: we're running periodic status check for this task - RESULT: the task has finished and result is ready - """ - NONE = -1 - START = 0 - EXECUTED = 1 - STATUS = 2 - RESULT = 3 diff --git a/neutron/plugins/vmware/vshield/tasks/tasks.py b/neutron/plugins/vmware/vshield/tasks/tasks.py deleted file mode 100644 index 7037c430d..000000000 --- a/neutron/plugins/vmware/vshield/tasks/tasks.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright 2013 VMware, Inc. -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import uuid - -from eventlet import event -from eventlet import greenthread - -from neutron.common import exceptions -from neutron.openstack.common import log as logging -from neutron.openstack.common import loopingcall -from neutron.plugins.vmware.vshield.tasks import constants - -DEFAULT_INTERVAL = 1000 - -LOG = logging.getLogger(__name__) - - -def nop(task): - return constants.TaskStatus.COMPLETED - - -class TaskException(exceptions.NeutronException): - - def __init__(self, message=None, **kwargs): - if message is not None: - self.message = message - - super(TaskException, self).__init__(**kwargs) - - -class InvalidState(TaskException): - message = _("Invalid state %(state)d") - - -class TaskStateSkipped(TaskException): - message = _("State %(state)d skipped. Current state %(current)d") - - -class Task(): - def __init__(self, name, resource_id, execute_callback, - status_callback=nop, result_callback=nop, userdata=None): - self.name = name - self.resource_id = resource_id - self._execute_callback = execute_callback - self._status_callback = status_callback - self._result_callback = result_callback - self.userdata = userdata - self.id = None - self.status = None - - self._monitors = { - constants.TaskState.START: [], - constants.TaskState.EXECUTED: [], - constants.TaskState.RESULT: [] - } - self._states = [None, None, None, None] - self._state = constants.TaskState.NONE - - def _add_monitor(self, action, func): - self._monitors[action].append(func) - return self - - def _move_state(self, state): - self._state = state - if self._states[state] is not None: - e = self._states[state] - self._states[state] = None - e.send() - - for s in range(state): - if self._states[s] is not None: - e = self._states[s] - self._states[s] = None - e.send_exception( - TaskStateSkipped(state=s, current=self._state)) - - def _invoke_monitor(self, state): - for func in self._monitors[state]: - try: - func(self) - except Exception: - msg = _("Task %(task)s encountered exception in %(func)s " - "at state %(state)s") % { - 'task': str(self), - 'func': str(func), - 'state': state} - LOG.exception(msg) - - self._move_state(state) - - return self - - def _start(self): - return self._invoke_monitor(constants.TaskState.START) - - def _executed(self): - return self._invoke_monitor(constants.TaskState.EXECUTED) - - def _update_status(self, status): - if self.status == status: - return self - - self.status = status - - def _finished(self): - return self._invoke_monitor(constants.TaskState.RESULT) - - def add_start_monitor(self, func): - return self._add_monitor(constants.TaskState.START, func) - - def add_executed_monitor(self, func): - return self._add_monitor(constants.TaskState.EXECUTED, func) - - def add_result_monitor(self, func): - return self._add_monitor(constants.TaskState.RESULT, func) - - def wait(self, state): - if (state < constants.TaskState.START or - state > constants.TaskState.RESULT or - state == constants.TaskState.STATUS): - raise InvalidState(state=state) - - if state <= self._state: - # we already passed this current state, so no wait - return - - e = event.Event() - self._states[state] = e - e.wait() - - def __repr__(self): - return "Task-%s-%s-%s" % ( - self.name, self.resource_id, self.id) - - -class TaskManager(): - - _instance = None - _default_interval = DEFAULT_INTERVAL - - def __init__(self, interval=None): - self._interval = interval or TaskManager._default_interval - - # A queue to pass tasks from other threads - self._tasks_queue = collections.deque() - - # A dict to store resource -> resource's tasks - self._tasks = {} - - # Current task being executed in main thread - self._main_thread_exec_task = None - - # New request event - self._req = event.Event() - - # TaskHandler stopped event - self._stopped = False - - # Periodic function trigger - self._monitor = None - self._monitor_busy = False - - # Thread handling the task request - self._thread = None - - def _execute(self, task): - """Execute task.""" - msg = _("Start task %s") % str(task) - LOG.debug(msg) - task._start() - try: - status = task._execute_callback(task) - except Exception: - msg = _("Task %(task)s encountered exception in %(cb)s") % { - 'task': str(task), - 'cb': str(task._execute_callback)} - LOG.exception(msg) - status = constants.TaskStatus.ERROR - - LOG.debug(_("Task %(task)s return %(status)s"), { - 'task': str(task), - 'status': status}) - - task._update_status(status) - task._executed() - - return status - - def _result(self, task): - """Notify task execution result.""" - try: - task._result_callback(task) - except Exception: - msg = _("Task %(task)s encountered exception in %(cb)s") % { - 'task': str(task), - 'cb': str(task._result_callback)} - LOG.exception(msg) - - LOG.debug(_("Task %(task)s return %(status)s"), - {'task': str(task), 'status': task.status}) - - task._finished() - - def _check_pending_tasks(self): - """Check all pending tasks status.""" - for resource_id in self._tasks.keys(): - if self._stopped: - # Task manager is stopped, return now - return - - tasks = self._tasks[resource_id] - # only the first task is executed and pending - task = tasks[0] - try: - status = task._status_callback(task) - except Exception: - msg = _("Task %(task)s encountered exception in %(cb)s") % { - 'task': str(task), - 'cb': str(task._status_callback)} - LOG.exception(msg) - status = constants.TaskStatus.ERROR - task._update_status(status) - if status != constants.TaskStatus.PENDING: - self._dequeue(task, True) - - def _enqueue(self, task): - if task.resource_id in self._tasks: - # append to existing resource queue for ordered processing - self._tasks[task.resource_id].append(task) - else: - # put the task to a new resource queue - tasks = collections.deque() - tasks.append(task) - self._tasks[task.resource_id] = tasks - - def _dequeue(self, task, run_next): - self._result(task) - tasks = self._tasks[task.resource_id] - tasks.remove(task) - if not tasks: - # no more tasks for this resource - del self._tasks[task.resource_id] - return - - if run_next: - # process next task for this resource - while tasks: - task = tasks[0] - status = self._execute(task) - if status == constants.TaskStatus.PENDING: - break - self._dequeue(task, False) - - def _abort(self): - """Abort all tasks.""" - # put all tasks haven't been received by main thread to queue - # so the following abort handling can cover them - for t in self._tasks_queue: - self._enqueue(t) - self._tasks_queue.clear() - - for resource_id in self._tasks.keys(): - tasks = list(self._tasks[resource_id]) - for task in tasks: - task._update_status(constants.TaskStatus.ABORT) - self._dequeue(task, False) - - def _get_task(self): - """Get task request.""" - while True: - for t in self._tasks_queue: - return self._tasks_queue.popleft() - self._req.wait() - self._req.reset() - - def run(self): - while True: - try: - if self._stopped: - # Gracefully terminate this thread if the _stopped - # attribute was set to true - LOG.info(_("Stopping TaskManager")) - break - - # get a task from queue, or timeout for periodic status check - task = self._get_task() - if task.resource_id in self._tasks: - # this resource already has some tasks under processing, - # append the task to same queue for ordered processing - self._enqueue(task) - continue - - try: - self._main_thread_exec_task = task - self._execute(task) - finally: - self._main_thread_exec_task = None - if task.status is None: - # The thread is killed during _execute(). To guarantee - # the task been aborted correctly, put it to the queue. - self._enqueue(task) - elif task.status != constants.TaskStatus.PENDING: - self._result(task) - else: - self._enqueue(task) - except Exception: - LOG.exception(_("TaskManager terminating because " - "of an exception")) - break - - def add(self, task): - task.id = uuid.uuid1() - self._tasks_queue.append(task) - if not self._req.ready(): - self._req.send() - return task.id - - def stop(self): - if self._thread is None: - return - self._stopped = True - self._thread.kill() - self._thread = None - # Stop looping call and abort running tasks - self._monitor.stop() - if self._monitor_busy: - self._monitor.wait() - self._abort() - LOG.info(_("TaskManager terminated")) - - def has_pending_task(self): - if self._tasks_queue or self._tasks or self._main_thread_exec_task: - return True - else: - return False - - def show_pending_tasks(self): - for task in self._tasks_queue: - LOG.info(str(task)) - for resource, tasks in self._tasks.iteritems(): - for task in tasks: - LOG.info(str(task)) - if self._main_thread_exec_task: - LOG.info(str(self._main_thread_exec_task)) - - def count(self): - count = 0 - for resource_id, tasks in self._tasks.iteritems(): - count += len(tasks) - return count - - def start(self, interval=None): - def _inner(): - self.run() - - def _loopingcall_callback(): - self._monitor_busy = True - try: - self._check_pending_tasks() - except Exception: - LOG.exception(_("Exception in _check_pending_tasks")) - self._monitor_busy = False - - if self._thread is not None: - return self - - if interval is None or interval == 0: - interval = self._interval - - self._stopped = False - self._thread = greenthread.spawn(_inner) - self._monitor = loopingcall.FixedIntervalLoopingCall( - _loopingcall_callback) - self._monitor.start(interval / 1000.0, - interval / 1000.0) - # To allow the created thread start running - greenthread.sleep(0) - - return self - - @classmethod - def set_default_interval(cls, interval): - cls._default_interval = interval diff --git a/neutron/plugins/vmware/vshield/vcns.py b/neutron/plugins/vmware/vshield/vcns.py deleted file mode 100644 index 11f0c1e2f..000000000 --- a/neutron/plugins/vmware/vshield/vcns.py +++ /dev/null @@ -1,304 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: linb, VMware - -from neutron.openstack.common import jsonutils -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.vshield.common import VcnsApiClient - -LOG = logging.getLogger(__name__) - -HTTP_GET = "GET" -HTTP_POST = "POST" -HTTP_DELETE = "DELETE" -HTTP_PUT = "PUT" -URI_PREFIX = "/api/4.0/edges" - -#FwaaS constants -FIREWALL_SERVICE = "firewall/config" -FIREWALL_RULE_RESOURCE = "rules" - -#LbaaS Constants -LOADBALANCER_SERVICE = "loadbalancer/config" -VIP_RESOURCE = "virtualservers" -POOL_RESOURCE = "pools" -MONITOR_RESOURCE = "monitors" -APP_PROFILE_RESOURCE = "applicationprofiles" - -# IPsec VPNaaS Constants -IPSEC_VPN_SERVICE = 'ipsec/config' - - -class Vcns(object): - - def __init__(self, address, user, password): - self.address = address - self.user = user - self.password = password - self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user, - password, 'json') - - def do_request(self, method, uri, params=None, format='json', **kwargs): - LOG.debug(_("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')"), { - 'method': method, - 'uri': uri, - 'body': jsonutils.dumps(params)}) - if format == 'json': - header, content = self.jsonapi_client.request(method, uri, params) - else: - header, content = self.xmlapi_client.request(method, uri, params) - LOG.debug(_("Header: '%s'"), header) - LOG.debug(_("Content: '%s'"), content) - if content == '': - return header, {} - if kwargs.get('decode', True): - content = jsonutils.loads(content) - return header, content - - def deploy_edge(self, request): - uri = URI_PREFIX + "?async=true" - return self.do_request(HTTP_POST, uri, request, decode=False) - - def get_edge_id(self, job_id): - uri = URI_PREFIX + "/jobs/%s" % job_id - return self.do_request(HTTP_GET, uri, decode=True) - - def get_edge_deploy_status(self, edge_id): - uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id - return self.do_request(HTTP_GET, uri, decode="True") - - def delete_edge(self, edge_id): - uri = "%s/%s" % (URI_PREFIX, edge_id) - return self.do_request(HTTP_DELETE, uri) - - def update_interface(self, edge_id, vnic): - uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index']) - return self.do_request(HTTP_PUT, uri, vnic, decode=True) - - def get_nat_config(self, edge_id): - uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) - return self.do_request(HTTP_GET, uri, decode=True) - - def update_nat_config(self, edge_id, nat): - uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) - return self.do_request(HTTP_PUT, uri, nat, decode=True) - - def delete_nat_rule(self, edge_id, rule_id): - uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id) - return self.do_request(HTTP_DELETE, uri, decode=True) - - def get_edge_status(self, edge_id): - uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id) - return self.do_request(HTTP_GET, uri, decode=True) - - def get_edges(self): - uri = URI_PREFIX - return self.do_request(HTTP_GET, uri, decode=True) - - def update_routes(self, edge_id, routes): - uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) - return self.do_request(HTTP_PUT, uri, routes) - - def create_lswitch(self, lsconfig): - uri = "/api/ws.v1/lswitch" - return self.do_request(HTTP_POST, uri, lsconfig, decode=True) - - def delete_lswitch(self, lswitch_id): - uri = "/api/ws.v1/lswitch/%s" % lswitch_id - return self.do_request(HTTP_DELETE, uri) - - def get_loadbalancer_config(self, edge_id): - uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) - return self.do_request(HTTP_GET, uri, decode=True) - - def enable_service_loadbalancer(self, edge_id, config): - uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) - return self.do_request(HTTP_PUT, uri, config) - - def update_firewall(self, edge_id, fw_req): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE) - return self.do_request(HTTP_PUT, uri, fw_req) - - def delete_firewall(self, edge_id): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, None) - return self.do_request(HTTP_DELETE, uri) - - def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, - FIREWALL_RULE_RESOURCE, - vcns_rule_id) - return self.do_request(HTTP_PUT, uri, fwr_req) - - def delete_firewall_rule(self, edge_id, vcns_rule_id): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, - FIREWALL_RULE_RESOURCE, - vcns_rule_id) - return self.do_request(HTTP_DELETE, uri) - - def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, - FIREWALL_RULE_RESOURCE) - uri += "?aboveRuleId=" + ref_vcns_rule_id - return self.do_request(HTTP_POST, uri, fwr_req) - - def add_firewall_rule(self, edge_id, fwr_req): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, - FIREWALL_RULE_RESOURCE) - return self.do_request(HTTP_POST, uri, fwr_req) - - def get_firewall(self, edge_id): - uri = self._build_uri_path(edge_id, FIREWALL_SERVICE) - return self.do_request(HTTP_GET, uri, decode=True) - - def get_firewall_rule(self, edge_id, vcns_rule_id): - uri = self._build_uri_path( - edge_id, FIREWALL_SERVICE, - FIREWALL_RULE_RESOURCE, - vcns_rule_id) - return self.do_request(HTTP_GET, uri, decode=True) - - # - #Edge LBAAS call helper - # - def create_vip(self, edge_id, vip_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - VIP_RESOURCE) - return self.do_request(HTTP_POST, uri, vip_new) - - def get_vip(self, edge_id, vip_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - VIP_RESOURCE, vip_vseid) - return self.do_request(HTTP_GET, uri, decode=True) - - def update_vip(self, edge_id, vip_vseid, vip_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - VIP_RESOURCE, vip_vseid) - return self.do_request(HTTP_PUT, uri, vip_new) - - def delete_vip(self, edge_id, vip_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - VIP_RESOURCE, vip_vseid) - return self.do_request(HTTP_DELETE, uri) - - def create_pool(self, edge_id, pool_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - POOL_RESOURCE) - return self.do_request(HTTP_POST, uri, pool_new) - - def get_pool(self, edge_id, pool_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - POOL_RESOURCE, pool_vseid) - return self.do_request(HTTP_GET, uri, decode=True) - - def update_pool(self, edge_id, pool_vseid, pool_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - POOL_RESOURCE, pool_vseid) - return self.do_request(HTTP_PUT, uri, pool_new) - - def delete_pool(self, edge_id, pool_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - POOL_RESOURCE, pool_vseid) - return self.do_request(HTTP_DELETE, uri) - - def create_health_monitor(self, edge_id, monitor_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - MONITOR_RESOURCE) - return self.do_request(HTTP_POST, uri, monitor_new) - - def get_health_monitor(self, edge_id, monitor_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - MONITOR_RESOURCE, monitor_vseid) - return self.do_request(HTTP_GET, uri, decode=True) - - def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - MONITOR_RESOURCE, - monitor_vseid) - return self.do_request(HTTP_PUT, uri, monitor_new) - - def delete_health_monitor(self, edge_id, monitor_vseid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - MONITOR_RESOURCE, - monitor_vseid) - return self.do_request(HTTP_DELETE, uri) - - def create_app_profile(self, edge_id, app_profile): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - APP_PROFILE_RESOURCE) - return self.do_request(HTTP_POST, uri, app_profile) - - def update_app_profile(self, edge_id, app_profileid, app_profile): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - APP_PROFILE_RESOURCE, app_profileid) - return self.do_request(HTTP_PUT, uri, app_profile) - - def delete_app_profile(self, edge_id, app_profileid): - uri = self._build_uri_path( - edge_id, LOADBALANCER_SERVICE, - APP_PROFILE_RESOURCE, - app_profileid) - return self.do_request(HTTP_DELETE, uri) - - def update_ipsec_config(self, edge_id, ipsec_config): - uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) - return self.do_request(HTTP_PUT, uri, ipsec_config) - - def delete_ipsec_config(self, edge_id): - uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) - return self.do_request(HTTP_DELETE, uri) - - def get_ipsec_config(self, edge_id): - uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) - return self.do_request(HTTP_GET, uri) - - def _build_uri_path(self, edge_id, - service, - resource=None, - resource_id=None, - parent_resource_id=None, - fields=None, - relations=None, - filters=None, - types=None, - is_attachment=False): - uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service) - if resource: - res_path = resource + (resource_id and "/%s" % resource_id or '') - uri_path = "%s/%s" % (uri_prefix, res_path) - else: - uri_path = uri_prefix - return uri_path diff --git a/neutron/plugins/vmware/vshield/vcns_driver.py b/neutron/plugins/vmware/vshield/vcns_driver.py deleted file mode 100644 index e705b3329..000000000 --- a/neutron/plugins/vmware/vshield/vcns_driver.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2013 VMware, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: linb, VMware - -from oslo.config import cfg - -from neutron.openstack.common import log as logging -from neutron.plugins.vmware.common import config # noqa -from neutron.plugins.vmware.vshield import edge_appliance_driver -from neutron.plugins.vmware.vshield import edge_firewall_driver -from neutron.plugins.vmware.vshield import edge_ipsecvpn_driver -from neutron.plugins.vmware.vshield import edge_loadbalancer_driver -from neutron.plugins.vmware.vshield.tasks import tasks -from neutron.plugins.vmware.vshield import vcns - -LOG = logging.getLogger(__name__) - - -class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, - edge_firewall_driver.EdgeFirewallDriver, - edge_loadbalancer_driver.EdgeLbDriver, - edge_ipsecvpn_driver.EdgeIPsecVpnDriver): - - def __init__(self, callbacks): - super(VcnsDriver, self).__init__() - - self.callbacks = callbacks - self.vcns_uri = cfg.CONF.vcns.manager_uri - self.vcns_user = cfg.CONF.vcns.user - self.vcns_passwd = cfg.CONF.vcns.password - self.datacenter_moid = cfg.CONF.vcns.datacenter_moid - self.deployment_container_id = cfg.CONF.vcns.deployment_container_id - self.resource_pool_id = cfg.CONF.vcns.resource_pool_id - self.datastore_id = cfg.CONF.vcns.datastore_id - self.external_network = cfg.CONF.vcns.external_network - interval = cfg.CONF.vcns.task_status_check_interval - self.task_manager = tasks.TaskManager(interval) - self.task_manager.start() - self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)