add neutron cascaded big2layer patch

Change-Id: I0860ed5a6291ea7c19611738e7a519de5f24b728
This commit is contained in:
zhangni 2014-11-29 18:41:20 +08:00
parent 83da6bf451
commit d4e3d1c813
7 changed files with 1168 additions and 0 deletions

View File

@ -0,0 +1,97 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (c) 2014 Huawei Technologies.
_NEUTRON_CONF_DIR="/etc/neutron"
_NEUTRON_CONF_FILE='neutron.conf'
_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages"
_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron"
# if you did not make changes to the installation files,
# please do not edit the following directories.
_CODE_DIR="../neutron/"
_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-server-big2layer-patch-installation-backup"
if [[ ${EUID} -ne 0 ]]; then
echo "Please run as root."
exit 1
fi
##Redirecting output to logfile as well as stdout
#exec > >(tee -a ${_SCRIPT_LOGFILE})
#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2)
cd `dirname $0`
echo "checking installation directories..."
if [ ! -d "${_NEUTRON_DIR}" ] ; then
echo "Could not find the neutron installation. Please check the variables in the beginning of the script."
echo "aborted."
exit 1
fi
if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then
echo "Could not find neutron config file. Please check the variables in the beginning of the script."
echo "aborted."
exit 1
fi
echo "checking previous installation..."
if [ -d "${_BACKUP_DIR}/neutron" ] ; then
echo "It seems neutron-server-big2layer-cascaded-patch has already been installed!"
echo "Please check README for solution if this is not true."
exit 1
fi
echo "backing up current files that might be overwritten..."
mkdir -p "${_BACKUP_DIR}"
cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/"
if [ $? -ne 0 ] ; then
rm -r "${_BACKUP_DIR}/neutron"
echo "Error in code backup, aborted."
exit 1
fi
echo "copying in new files..."
cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}`
if [ $? -ne 0 ] ; then
echo "Error in copying, aborted."
echo "Recovering original files..."
cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron"
if [ $? -ne 0 ] ; then
echo "Recovering failed! Please install manually."
fi
exit 1
fi
echo "restarting cascaded neutron server..."
service neutron-server restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascaded neutron server manually."
exit 1
fi
echo "restarting cascaded neutron-plugin-openvswitch-agent..."
service neutron-plugin-openvswitch-agent restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually."
exit 1
fi
echo "restarting cascaded neutron-l3-agent..."
service neutron-l3-agent restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually."
exit 1
fi
echo "Completed."
echo "See README to get started."
exit 0

View File

@ -0,0 +1,28 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
l2_population_options = [
cfg.IntOpt('agent_boot_time', default=180,
help=_('Delay within which agent is expected to update '
'existing ports whent it restarts')),
cfg.StrOpt('cascaded_gateway', default='no_gateway',
help=_('if not existing the gateway host Configure no_gateway'
'else configure admin_gateway or population_opt')),
]
cfg.CONF.register_opts(l2_population_options, "l2pop")

View File

@ -0,0 +1,136 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import sql
from neutron.common import constants as const
from neutron.db import agents_db
from neutron.db import common_db_mixin as base_db
from neutron.db import models_v2
from neutron.openstack.common import jsonutils
from neutron.openstack.common import timeutils
from neutron.plugins.ml2.drivers.l2pop import constants as l2_const
from neutron.plugins.ml2 import models as ml2_models
class L2populationDbMixin(base_db.CommonDbMixin):
def get_agent_ip_by_host(self, session, agent_host):
agent = self.get_agent_by_host(session, agent_host)
if agent:
return self.get_agent_ip(agent)
def get_agent_ip(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('tunneling_ip')
def get_agent_uptime(self, agent):
return timeutils.delta_seconds(agent.started_at,
agent.heartbeat_timestamp)
def get_agent_tunnel_types(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('tunnel_types')
def get_agent_l2pop_network_types(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('l2pop_network_types')
def get_agent_by_host(self, session, agent_host):
with session.begin(subtransactions=True):
query = session.query(agents_db.Agent)
query = query.filter(agents_db.Agent.host == agent_host,
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query.first()
def get_network_ports(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.PortBinding,
agents_db.Agent)
query = query.join(agents_db.Agent,
agents_db.Agent.host ==
ml2_models.PortBinding.host)
query = query.join(models_v2.Port)
query = query.filter(models_v2.Port.network_id == network_id,
models_v2.Port.admin_state_up == sql.true(),
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query
def get_nondvr_network_ports(self, session, network_id):
query = self.get_network_ports(session, network_id)
return query.filter(models_v2.Port.device_owner !=
const.DEVICE_OWNER_DVR_INTERFACE)
def get_dvr_network_ports(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.DVRPortBinding,
agents_db.Agent)
query = query.join(agents_db.Agent,
agents_db.Agent.host ==
ml2_models.DVRPortBinding.host)
query = query.join(models_v2.Port)
query = query.filter(models_v2.Port.network_id == network_id,
models_v2.Port.admin_state_up == sql.true(),
models_v2.Port.device_owner ==
const.DEVICE_OWNER_DVR_INTERFACE,
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query
def get_agent_network_active_port_count(self, session, agent_host,
network_id):
with session.begin(subtransactions=True):
query = session.query(models_v2.Port)
query1 = query.join(ml2_models.PortBinding)
query1 = query1.filter(models_v2.Port.network_id == network_id,
models_v2.Port.status ==
const.PORT_STATUS_ACTIVE,
models_v2.Port.device_owner !=
const.DEVICE_OWNER_DVR_INTERFACE,
ml2_models.PortBinding.host == agent_host)
query2 = query.join(ml2_models.DVRPortBinding)
query2 = query2.filter(models_v2.Port.network_id == network_id,
ml2_models.DVRPortBinding.status ==
const.PORT_STATUS_ACTIVE,
models_v2.Port.device_owner ==
const.DEVICE_OWNER_DVR_INTERFACE,
ml2_models.DVRPortBinding.host ==
agent_host)
return (query1.count() + query2.count())
def get_host_ip_from_binding_profile(self, profile):
if(not profile):
return
profile = jsonutils.loads(profile)
return profile.get('host_ip')
def get_segment_by_network_id(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.NetworkSegment)
query = query.filter(
ml2_models.NetworkSegment.network_id == network_id,
ml2_models.NetworkSegment.network_type == 'vxlan')
return query.first()
def get_remote_ports(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.PortBinding)
query = query.join(models_v2.Port)
query = query.filter(
models_v2.Port.network_id == network_id,
ml2_models.PortBinding.profile.contains('"port_key": "remote_port"'))
return query

View File

@ -0,0 +1,383 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import constants as const
from neutron import context as n_context
from neutron.db import api as db_api
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.l2pop import config # noqa
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
LOG = logging.getLogger(__name__)
class L2populationMechanismDriver(api.MechanismDriver,
l2pop_db.L2populationDbMixin):
def __init__(self):
super(L2populationMechanismDriver, self).__init__()
self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
def initialize(self):
LOG.debug(_("Experimental L2 population driver"))
self.rpc_ctx = n_context.get_admin_context_without_session()
self.migrated_ports = {}
self.remove_fdb_entries = {}
self.remove_remote_ports_fdb = {}
def _get_port_fdb_entries(self, port):
return [[port['mac_address'],
ip['ip_address']] for ip in port['fixed_ips']]
def _is_remote_port(self, port):
return port['binding:profile'].get('port_key') == 'remote_port'
def create_port_postcommit(self, context):
"""
if port is "remote_port",
then notify all l2-agent or only l2-gateway-agent
else do nothing
"""
port_context = context.current
if(self._is_remote_port(port_context)):
other_fdb_entries = self.get_remote_port_fdb(port_context)
if(not other_fdb_entries):
return
if(cfg.CONF.l2pop.cascaded_gateway == 'no_gateway'):
# notify all l2-agent
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
else:
# only notify to l2-gateway-agent
pass
def get_remote_port_fdb(self, port_context):
port_id = port_context['id']
network_id = port_context['network_id']
session = db_api.get_session()
segment = self.get_segment_by_network_id(session, network_id)
if not segment:
LOG.warning(_("Network %(network_id)s has no "
" vxlan provider, so cannot get segment"),
{'network_id': network_id})
return
ip = port_context['binding:profile'].get('host_ip')
if not ip:
LOG.debug(_("Unable to retrieve the ip from remote port, "
"check the remote port %(port_id)."),
{'port_id': port_id})
return
other_fdb_entries = {network_id:
{'segment_id': segment.segmentation_id,
'network_type': segment.network_type,
'ports': {}}}
ports = other_fdb_entries[network_id]['ports']
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
agent_ports += self._get_port_fdb_entries(port_context)
ports[ip] = agent_ports
return other_fdb_entries
def _get_agent_host(self, context, port):
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
agent_host = context.binding.host
else:
agent_host = port['binding:host_id']
return agent_host
def delete_port_precommit(self, context):
# TODO(matrohon): revisit once the original bound segment will be
# available in delete_port_postcommit. in delete_port_postcommit
# agent_active_ports will be equal to 0, and the _update_port_down
# won't need agent_active_ports_count_for_flooding anymore
port = context.current
if(self._is_remote_port(port)):
fdb_entry = self.get_remote_port_fdb(port)
self.remove_remote_ports_fdb[port['id']] = fdb_entry
agent_host = context.host #self._get_agent_host(context, port)
if port['id'] not in self.remove_fdb_entries:
self.remove_fdb_entries[port['id']] = {}
self.remove_fdb_entries[port['id']][agent_host] = (
self._update_port_down(context, port, agent_host))
def delete_port_postcommit(self, context):
port = context.current
agent_host = context.host #self._get_agent_host(context, port)
if port['id'] in self.remove_fdb_entries:
for agent_host in list(self.remove_fdb_entries[port['id']]):
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx,
self.remove_fdb_entries[port['id']][agent_host])
self.remove_fdb_entries[port['id']].pop(agent_host, 0)
self.remove_fdb_entries.pop(port['id'], 0)
remote_port_fdb = self.remove_remote_ports_fdb.pop(
context.current['id'],
None)
if(remote_port_fdb):
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, remote_port_fdb)
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
# check if an ip has been added or removed
orig_chg_ips = orig_ips.difference(port_ips)
port_chg_ips = port_ips.difference(orig_ips)
if orig_chg_ips or port_chg_ips:
return orig_chg_ips, port_chg_ips
def _fixed_ips_changed(self, context, orig, port, diff_ips):
orig_ips, port_ips = diff_ips
if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
agent_host = context.host
else:
agent_host = context.original_host
port_infos = self._get_port_infos(
context, orig, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
orig_mac_ip = [[port['mac_address'], ip] for ip in orig_ips]
port_mac_ip = [[port['mac_address'], ip] for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if port_mac_ip:
ports['after'] = port_mac_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
return True
def update_port_postcommit(self, context):
port = context.current
orig = context.original
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._update_port_down(
context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.status == const.PORT_STATUS_ACTIVE
and not self.migrated_ports.get(orig['id'])):
# The port has been migrated. We have to store the original
# binding to send appropriate fdb once the port will be set
# on the destination host
self.migrated_ports[orig['id']] = (
(orig, context.original_host))
elif context.status != context.original_status:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
elif context.status == const.PORT_STATUS_DOWN:
fdb_entries = self._update_port_down(
context, port, context.host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif context.status == const.PORT_STATUS_BUILD:
orig = self.migrated_ports.pop(port['id'], None)
if orig:
original_port = orig[0]
original_host = orig[1]
# this port has been migrated: remove its entries from fdb
fdb_entries = self._update_port_down(
context, original_port, original_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def _get_port_infos(self, context, port, agent_host):
if not agent_host:
return
session = db_api.get_session()
agent = self.get_agent_by_host(session, agent_host)
if not agent:
return
agent_ip = self.get_agent_ip(agent)
if not agent_ip:
LOG.warning(_("Unable to retrieve the agent ip, check the agent "
"configuration."))
return
segment = context.bound_segment
if not segment:
LOG.warning(_("Port %(port)s updated by agent %(agent)s "
"isn't bound to any segment"),
{'port': port['id'], 'agent': agent})
return
network_types = self.get_agent_l2pop_network_types(agent)
if network_types is None:
network_types = self.get_agent_tunnel_types(agent)
if segment['network_type'] not in network_types:
return
fdb_entries = self._get_port_fdb_entries(port)
return agent, agent_host, agent_ip, segment, fdb_entries
def _update_port_up(self, context):
port = context.current
agent_host = context.host
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 1 or (
self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time):
# First port activated on current agent in this network,
# we have to provide it with the whole list of fdb entries
agent_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {}}}
ports = agent_fdb_entries[network_id]['ports']
nondvr_network_ports = self.get_nondvr_network_ports(session,
network_id)
for network_port in nondvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
ip = self.get_agent_ip(agent)
if not ip:
LOG.debug(_("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration."),
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
agent_ports += self._get_port_fdb_entries(binding.port)
ports[ip] = agent_ports
if cfg.CONF.l2pop.cascaded_gateway == 'no_gateway':
remote_ports = self.get_remote_ports(session, network_id)
else:
remote_ports = {}
# elif cfg.CONF.cascaded_gateway == 'admin_gateway' or
# cfg.CONF.cascaded_gateway == 'population_opt':
# if self.is_proxy_port(port_context):
# remote_ports = self.get_remote_ports(session, network_id)
# else:
for binding in remote_ports:
profile = binding['profile']
ip = self.get_host_ip_from_binding_profile(profile)
if not ip:
LOG.debug(_("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration."),
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
agent_ports += self._get_port_fdb_entries(binding.port)
ports[ip] = agent_ports
dvr_network_ports = self.get_dvr_network_ports(session, network_id)
for network_port in dvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
ip = self.get_agent_ip(agent)
if not ip:
LOG.debug(_("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration."),
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
ports[ip] = agent_ports
# And notify other agents to add flooding entry
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
if ports.keys():
self.L2populationAgentNotify.add_fdb_entries(
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
other_fdb_entries[network_id]['ports'][agent_ip] += (
port_fdb_entries)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _update_port_down(self, context, port, agent_host):
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 0:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
fdb_entries = port_fdb_entries
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries

View File

@ -0,0 +1,97 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (c) 2014 Huawei Technologies.
_NEUTRON_CONF_DIR="/etc/neutron"
_NEUTRON_CONF_FILE='neutron.conf'
_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages"
_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron"
# if you did not make changes to the installation files,
# please do not edit the following directories.
_CODE_DIR="../neutron/"
_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascading-server-big2layer-patch-installation-backup"
if [[ ${EUID} -ne 0 ]]; then
echo "Please run as root."
exit 1
fi
##Redirecting output to logfile as well as stdout
#exec > >(tee -a ${_SCRIPT_LOGFILE})
#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2)
cd `dirname $0`
echo "checking installation directories..."
if [ ! -d "${_NEUTRON_DIR}" ] ; then
echo "Could not find the neutron installation. Please check the variables in the beginning of the script."
echo "aborted."
exit 1
fi
if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then
echo "Could not find neutron config file. Please check the variables in the beginning of the script."
echo "aborted."
exit 1
fi
echo "checking previous installation..."
if [ -d "${_BACKUP_DIR}/neutron" ] ; then
echo "It seems neutron-server-big2layer-cascading-patch has already been installed!"
echo "Please check README for solution if this is not true."
exit 1
fi
echo "backing up current files that might be overwritten..."
mkdir -p "${_BACKUP_DIR}"
cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/"
if [ $? -ne 0 ] ; then
rm -r "${_BACKUP_DIR}/neutron"
echo "Error in code backup, aborted."
exit 1
fi
echo "copying in new files..."
cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}`
if [ $? -ne 0 ] ; then
echo "Error in copying, aborted."
echo "Recovering original files..."
cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron"
if [ $? -ne 0 ] ; then
echo "Recovering failed! Please install manually."
fi
exit 1
fi
echo "restarting cascading neutron server..."
service neutron-server restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascading neutron server manually."
exit 1
fi
echo "restarting cascading neutron-plugin-openvswitch-agent..."
service neutron-plugin-openvswitch-agent restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascading neutron-plugin-openvswitch-agent manually."
exit 1
fi
echo "restarting cascading neutron-l3-agent..."
service neutron-l3-agent restart
if [ $? -ne 0 ] ; then
echo "There was an error in restarting the service, please restart cascading neutron-l3-agent manually."
exit 1
fi
echo "Completed."
echo "See README to get started."
exit 0

View File

@ -0,0 +1,123 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import sql
from neutron.common import constants as const
from neutron.db import agents_db
from neutron.db import common_db_mixin as base_db
from neutron.db import models_v2
from neutron.openstack.common import jsonutils
from neutron.openstack.common import timeutils
from neutron.plugins.ml2.drivers.l2pop import constants as l2_const
from neutron.plugins.ml2 import models as ml2_models
class L2populationDbMixin(base_db.CommonDbMixin):
def get_agent_ip_by_host(self, session, agent_host):
agent = self.get_agent_by_host(session, agent_host)
if agent:
return self.get_agent_ip(agent)
def get_agent_ip(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('tunneling_ip')
def get_host_ip_from_binding_profile(self, port):
ip = port['binding:profile'].get('host_ip')
return ip
def get_host_ip_from_binding_profile_str(self, profile):
if(not profile):
return
profile = jsonutils.loads(profile)
return profile.get('host_ip')
def get_agent_uptime(self, agent):
return timeutils.delta_seconds(agent.started_at,
agent.heartbeat_timestamp)
def get_agent_tunnel_types(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('tunnel_types')
def get_agent_l2pop_network_types(self, agent):
configuration = jsonutils.loads(agent.configurations)
return configuration.get('l2pop_network_types')
def get_agent_by_host(self, session, agent_host):
with session.begin(subtransactions=True):
query = session.query(agents_db.Agent)
query = query.filter(agents_db.Agent.host == agent_host,
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query.first()
def get_network_ports(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.PortBinding,
agents_db.Agent)
query = query.join(agents_db.Agent,
agents_db.Agent.host ==
ml2_models.PortBinding.host)
query = query.join(models_v2.Port)
query = query.filter(models_v2.Port.network_id == network_id,
models_v2.Port.admin_state_up == sql.true(),
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query
def get_nondvr_network_ports(self, session, network_id):
query = self.get_network_ports(session, network_id)
return query.filter(models_v2.Port.device_owner !=
const.DEVICE_OWNER_DVR_INTERFACE)
def get_dvr_network_ports(self, session, network_id):
with session.begin(subtransactions=True):
query = session.query(ml2_models.DVRPortBinding,
agents_db.Agent)
query = query.join(agents_db.Agent,
agents_db.Agent.host ==
ml2_models.DVRPortBinding.host)
query = query.join(models_v2.Port)
query = query.filter(models_v2.Port.network_id == network_id,
models_v2.Port.admin_state_up == sql.true(),
models_v2.Port.device_owner ==
const.DEVICE_OWNER_DVR_INTERFACE,
agents_db.Agent.agent_type.in_(
l2_const.SUPPORTED_AGENT_TYPES))
return query
def get_agent_network_active_port_count(self, session, agent_host,
network_id):
with session.begin(subtransactions=True):
query = session.query(models_v2.Port)
query1 = query.join(ml2_models.PortBinding)
query1 = query1.filter(models_v2.Port.network_id == network_id,
models_v2.Port.status ==
const.PORT_STATUS_ACTIVE,
models_v2.Port.device_owner !=
const.DEVICE_OWNER_DVR_INTERFACE,
ml2_models.PortBinding.host == agent_host)
query2 = query.join(ml2_models.DVRPortBinding)
query2 = query2.filter(models_v2.Port.network_id == network_id,
ml2_models.DVRPortBinding.status ==
const.PORT_STATUS_ACTIVE,
models_v2.Port.device_owner ==
const.DEVICE_OWNER_DVR_INTERFACE,
ml2_models.DVRPortBinding.host ==
agent_host)
return (query1.count() + query2.count())

View File

@ -0,0 +1,304 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import constants as const
from neutron import context as n_context
from neutron.db import api as db_api
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.l2pop import config # noqa
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
LOG = logging.getLogger(__name__)
class L2populationMechanismDriver(api.MechanismDriver,
l2pop_db.L2populationDbMixin):
def __init__(self):
super(L2populationMechanismDriver, self).__init__()
self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
def initialize(self):
LOG.debug(_("Experimental L2 population driver"))
self.rpc_ctx = n_context.get_admin_context_without_session()
self.migrated_ports = {}
self.remove_fdb_entries = {}
def _get_port_fdb_entries(self, port):
return [[port['mac_address'], port['device_owner'],
ip['ip_address']] for ip in port['fixed_ips']]
def _get_agent_host(self, context, port):
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
agent_host = context.binding.host
else:
agent_host = port['binding:host_id']
return agent_host
def delete_port_precommit(self, context):
# TODO(matrohon): revisit once the original bound segment will be
# available in delete_port_postcommit. in delete_port_postcommit
# agent_active_ports will be equal to 0, and the _update_port_down
# won't need agent_active_ports_count_for_flooding anymore
port = context.current
agent_host = context.host #self._get_agent_host(context, port)
if port['id'] not in self.remove_fdb_entries:
self.remove_fdb_entries[port['id']] = {}
self.remove_fdb_entries[port['id']][agent_host] = (
self._update_port_down(context, port, 1))
def delete_port_postcommit(self, context):
port = context.current
agent_host = context.host
fdb_entries = self._update_port_down(context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
fdb_entries)
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
# check if an ip has been added or removed
orig_chg_ips = orig_ips.difference(port_ips)
port_chg_ips = port_ips.difference(orig_ips)
if orig_chg_ips or port_chg_ips:
return orig_chg_ips, port_chg_ips
def _fixed_ips_changed(self, context, orig, port, diff_ips):
orig_ips, port_ips = diff_ips
if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
agent_host = context.host
else:
agent_host = context.original_host
port_infos = self._get_port_infos(
context, orig, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
orig_mac_ip = [[port['mac_address'], port['device_owner'], ip]
for ip in orig_ips]
port_mac_ip = [[port['mac_address'], port['device_owner'], ip]
for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if port_mac_ip:
ports['after'] = port_mac_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
return True
def update_port_postcommit(self, context):
port = context.current
orig = context.original
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._update_port_down(
context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.status == const.PORT_STATUS_ACTIVE
and not self.migrated_ports.get(orig['id'])):
# The port has been migrated. We have to store the original
# binding to send appropriate fdb once the port will be set
# on the destination host
self.migrated_ports[orig['id']] = (
(orig, context.original_host))
elif context.status != context.original_status:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
elif context.status == const.PORT_STATUS_DOWN:
fdb_entries = self._update_port_down(
context, port, context.host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif context.status == const.PORT_STATUS_BUILD:
orig = self.migrated_ports.pop(port['id'], None)
if orig:
original_port = orig[0]
original_host = orig[1]
# this port has been migrated: remove its entries from fdb
fdb_entries = self._update_port_down(
context, original_port, original_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def _get_port_infos(self, context, port, agent_host):
if not agent_host:
return
session = db_api.get_session()
agent = self.get_agent_by_host(session, agent_host)
if not agent:
return
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
agent_ip = self.get_agent_ip(agent)
else:
agent_ip = self.get_host_ip_from_binding_profile(port)
if not agent_ip:
LOG.warning(_("Unable to retrieve the agent ip, check the agent "
"configuration."))
return
segment = context.bound_segment
if not segment:
LOG.warning(_("Port %(port)s updated by agent %(agent)s "
"isn't bound to any segment"),
{'port': port['id'], 'agent': agent})
return
network_types = self.get_agent_l2pop_network_types(agent)
if network_types is None:
network_types = self.get_agent_tunnel_types(agent)
if segment['network_type'] not in network_types:
return
fdb_entries = self._get_port_fdb_entries(port)
return agent, agent_host, agent_ip, segment, fdb_entries
def _update_port_up(self, context):
port = context.current
agent_host = context.host
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 1 or (
self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time):
# First port activated on current agent in this network,
# we have to provide it with the whole list of fdb entries
agent_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {}}}
ports = agent_fdb_entries[network_id]['ports']
#import pdb;pdb.set_trace()
nondvr_network_ports = self.get_nondvr_network_ports(session,
network_id)
for network_port in nondvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
#ip = self.get_agent_ip(agent)
profile = binding['profile']
ip = self.get_host_ip_from_binding_profile_str(profile)
if not ip:
LOG.debug(_("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration."),
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
agent_ports += self._get_port_fdb_entries(binding.port)
ports[ip] = agent_ports
# comment by j00209498
# dvr_network_ports = self.get_dvr_network_ports(session, network_id)
# for network_port in dvr_network_ports:
# binding, agent = network_port
# if agent.host == agent_host:
# continue
#
# ip = self.get_agent_ip(agent)
# if not ip:
# LOG.debug(_("Unable to retrieve the agent ip, check "
# "the agent %(agent_host)s configuration."),
# {'agent_host': agent.host})
# continue
#
# agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
# ports[ip] = agent_ports
# And notify other agents to add flooding entry
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
if ports.keys():
self.L2populationAgentNotify.add_fdb_entries(
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
other_fdb_entries[network_id]['ports'][agent_ip] += (
port_fdb_entries)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _update_port_down(self, context, port, agent_host):
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 0:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
fdb_entries = port_fdb_entries
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries