remove unnecessary files under neutron/db

Change-Id: I3b730a4161febbbd14f216d002335a4fd10317e8
changes/16/104816/1
Isaku Yamahata 9 years ago
parent aa337ef161
commit 2a4b74fe5a

@ -1,219 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc
from neutron.common import rpc_compat
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
cfg.CONF.register_opt(
cfg.IntOpt('agent_down_time', default=75,
help=_("Seconds to regard the agent is down; should be at "
"least twice report_interval, to be sure the "
"agent is down for good.")))
class Agent(model_base.BASEV2, models_v2.HasId):
"""Represents agents running in neutron deployments."""
__table_args__ = (
sa.UniqueConstraint('agent_type', 'host',
name='uniq_agents0agent_type0host'),
)
# L3 agent, DHCP agent, OVS agent, LinuxBridge
agent_type = sa.Column(sa.String(255), nullable=False)
binary = sa.Column(sa.String(255), nullable=False)
# TOPIC is a fanout exchange topic
topic = sa.Column(sa.String(255), nullable=False)
# TOPIC.host is a target topic
host = sa.Column(sa.String(255), nullable=False)
admin_state_up = sa.Column(sa.Boolean, default=True,
nullable=False)
# the time when first report came from agents
created_at = sa.Column(sa.DateTime, nullable=False)
# the time when first report came after agents start
started_at = sa.Column(sa.DateTime, nullable=False)
# updated when agents report
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
# description is note for admin user
description = sa.Column(sa.String(255))
# configurations: a json dict string, I think 4095 is enough
configurations = sa.Column(sa.String(4095), nullable=False)
@property
def is_active(self):
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
class AgentDbMixin(ext_agent.AgentPluginBase):
"""Mixin class to add agent extension to db_base_plugin_v2."""
def _get_agent(self, context, id):
try:
agent = self._get_by_id(context, Agent, id)
except exc.NoResultFound:
raise ext_agent.AgentNotFound(id=id)
return agent
@classmethod
def is_agent_down(cls, heart_beat_time):
return timeutils.is_older_than(heart_beat_time,
cfg.CONF.agent_down_time)
def get_configuration_dict(self, agent_db):
try:
conf = jsonutils.loads(agent_db.configurations)
except Exception:
msg = _('Configuration for agent %(agent_type)s on host %(host)s'
' is invalid.')
LOG.warn(msg, {'agent_type': agent_db.agent_type,
'host': agent_db.host})
conf = {}
return conf
def _make_agent_dict(self, agent, fields=None):
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
ext_agent.RESOURCE_NAME + 's')
res = dict((k, agent[k]) for k in attr
if k not in ['alive', 'configurations'])
res['alive'] = not AgentDbMixin.is_agent_down(
res['heartbeat_timestamp'])
res['configurations'] = self.get_configuration_dict(agent)
return self._fields(res, fields)
def delete_agent(self, context, id):
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
context.session.delete(agent)
def update_agent(self, context, id, agent):
agent_data = agent['agent']
with context.session.begin(subtransactions=True):
agent = self._get_agent(context, id)
agent.update(agent_data)
return self._make_agent_dict(agent)
def get_agents_db(self, context, filters=None):
query = self._get_collection_query(context, Agent, filters=filters)
return query.all()
def get_agents(self, context, filters=None, fields=None):
return self._get_collection(context, Agent,
self._make_agent_dict,
filters=filters, fields=fields)
def _get_agent_by_type_and_host(self, context, agent_type, host):
query = self._model_query(context, Agent)
try:
agent_db = query.filter(Agent.agent_type == agent_type,
Agent.host == host).one()
return agent_db
except exc.NoResultFound:
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
host=host)
except exc.MultipleResultsFound:
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
host=host)
def get_agent(self, context, id, fields=None):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)
def _create_or_update_agent(self, context, agent):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
configurations_dict = agent.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
res['created_at'] = current_time
res['started_at'] = current_time
res['heartbeat_timestamp'] = current_time
res['admin_state_up'] = True
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
greenthread.sleep(0)
def create_or_update_agent(self, context, agent):
"""Create or update agent according to report."""
try:
return self._create_or_update_agent(context, agent)
except db_exc.DBDuplicateEntry as e:
with excutils.save_and_reraise_exception() as ctxt:
if e.columns == ['agent_type', 'host']:
# It might happen that two or more concurrent transactions
# are trying to insert new rows having the same value of
# (agent_type, host) pair at the same time (if there has
# been no such entry in the table and multiple agent status
# updates are being processed at the moment). In this case
# having a unique constraint on (agent_type, host) columns
# guarantees that only one transaction will succeed and
# insert a new agent entry, others will fail and be rolled
# back. That means we must retry them one more time: no
# INSERTs will be issued, because
# _get_agent_by_type_and_host() will return the existing
# agent entry, which will be updated multiple times
ctxt.reraise = False
return self._create_or_update_agent(context, agent)
class AgentExtRpcCallback(rpc_compat.RpcCallback):
"""Processes the rpc report in plugin implementations."""
RPC_API_VERSION = '1.0'
START_TIME = timeutils.utcnow()
def __init__(self, plugin=None):
super(AgentExtRpcCallback, self).__init__()
self.plugin = plugin
def report_state(self, context, **kwargs):
"""Report state from agent to server."""
time = kwargs['time']
time = timeutils.parse_strtime(time)
if self.START_TIME > time:
LOG.debug(_("Message with invalid timestamp received"))
return
agent_state = kwargs['agent_state']['agent_state']
if not self.plugin:
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.create_or_update_agent(context, agent_state)

@ -1,226 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import constants
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import model_base
from neutron.extensions import agent as ext_agent
from neutron.extensions import dhcpagentscheduler
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),
cfg.IntOpt('dhcp_agents_per_network', default=1,
help=_('Number of DHCP agents scheduled to host a network.')),
]
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
class NetworkDhcpAgentBinding(model_base.BASEV2):
"""Represents binding between neutron networks and DHCP agents."""
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete='CASCADE'),
primary_key=True)
dhcp_agent = orm.relation(agents_db.Agent)
dhcp_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
"""Common class for agent scheduler mixins."""
# agent notifiers to handle agent update operations;
# should be updated by plugins;
agent_notifiers = {
constants.AGENT_TYPE_DHCP: None,
constants.AGENT_TYPE_L3: None,
constants.AGENT_TYPE_LOADBALANCER: None,
}
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agents_db.AgentDbMixin.is_agent_down(
agent['heartbeat_timestamp'])
def update_agent(self, context, id, agent):
original_agent = self.get_agent(context, id)
result = super(AgentSchedulerDbMixin, self).update_agent(
context, id, agent)
agent_data = agent['agent']
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
if (agent_notifier and
'admin_state_up' in agent_data and
original_agent['admin_state_up'] != agent_data['admin_state_up']):
agent_notifier.agent_updated(context,
agent_data['admin_state_up'],
original_agent['host'])
return result
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
.DhcpAgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2.
"""
network_scheduler = None
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None):
if not network_ids:
return []
query = context.session.query(NetworkDhcpAgentBinding)
query = query.options(joinedload('dhcp_agent'))
if len(network_ids) == 1:
query = query.filter(
NetworkDhcpAgentBinding.network_id == network_ids[0])
elif network_ids:
query = query.filter(
NetworkDhcpAgentBinding.network_id in network_ids)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
return [binding.dhcp_agent
for binding in query
if AgentSchedulerDbMixin.is_eligible_agent(active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
not agent_db['admin_state_up']):
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
for dhcp_agent in dhcp_agents:
if id == dhcp_agent.id:
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
network_id=network_id, agent_id=id)
binding = NetworkDhcpAgentBinding()
binding.dhcp_agent_id = id
binding.network_id = network_id
context.session.add(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_added_to_agent(
context, network_id, agent_db.host)
def remove_network_from_dhcp_agent(self, context, id, network_id):
agent = self._get_agent(context, id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(NetworkDhcpAgentBinding)
binding = query.filter(
NetworkDhcpAgentBinding.network_id == network_id,
NetworkDhcpAgentBinding.dhcp_agent_id == id).one()
except exc.NoResultFound:
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id=network_id, agent_id=id)
# reserve the port, so the ip is reused on a subsequent add
device_id = utils.get_dhcp_agent_device_id(network_id,
agent['host'])
filters = dict(device_id=[device_id])
ports = self.get_ports(context, filters=filters)
for port in ports:
port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT
self.update_port(context, port['id'], dict(port=port))
context.session.delete(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent.host)
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
return {'networks':
self.get_networks(context, filters={'id': net_ids})}
else:
return {'networks': []}
def list_active_networks_on_active_dhcp_agent(self, context, host):
try:
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_DHCP, host)
except ext_agent.AgentNotFoundByTypeHost:
LOG.debug("DHCP Agent not found on host %s", host)
return []
if not agent.admin_state_up:
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
return self.get_networks(
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
def list_dhcp_agents_hosting_network(self, context, network_id):
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
if agent_ids:
return {
'agents': self.get_agents(context, filters={'id': agent_ids})}
else:
return {'agents': []}
def schedule_network(self, context, created_network):
if self.network_scheduler:
return self.network_scheduler.schedule(
self, context, created_network)
def auto_schedule_networks(self, context, host):
if self.network_scheduler:
self.network_scheduler.auto_schedule_networks(self, context, host)

@ -1,147 +0,0 @@
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes as attr
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import allowedaddresspairs as addr_pair
class AllowedAddressPair(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
port = orm.relationship(
models_v2.Port,
backref=orm.backref("allowed_address_pairs",
lazy="joined", cascade="delete"))
class AllowedAddressPairsMixin(object):
"""Mixin class for allowed address pairs."""
def _process_create_allowed_address_pairs(self, context, port,
allowed_address_pairs):
if not attr.is_attr_set(allowed_address_pairs):
return []
with context.session.begin(subtransactions=True):
for address_pair in allowed_address_pairs:
# use port.mac_address if no mac address in address pair
if 'mac_address' not in address_pair:
address_pair['mac_address'] = port['mac_address']
db_pair = AllowedAddressPair(
port_id=port['id'],
mac_address=address_pair['mac_address'],
ip_address=address_pair['ip_address'])
context.session.add(db_pair)
return allowed_address_pairs
def get_allowed_address_pairs(self, context, port_id):
pairs = (context.session.query(AllowedAddressPair).
filter_by(port_id=port_id))
return [self._make_allowed_address_pairs_dict(pair)
for pair in pairs]
def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
# If port_db is provided, allowed address pairs will be accessed via
# sqlalchemy models. As they're loaded together with ports this
# will not cause an extra query.
allowed_address_pairs = [
self._make_allowed_address_pairs_dict(address_pair) for
address_pair in port_db.allowed_address_pairs]
port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])
def _delete_allowed_address_pairs(self, context, id):
query = self._model_query(context, AllowedAddressPair)
with context.session.begin(subtransactions=True):
query.filter(AllowedAddressPair.port_id == id).delete()
def _make_allowed_address_pairs_dict(self, allowed_address_pairs,
fields=None):
res = {'mac_address': allowed_address_pairs['mac_address'],
'ip_address': allowed_address_pairs['ip_address']}
return self._fields(res, fields)
def _has_address_pairs(self, port):
return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
and port['port'][addr_pair.ADDRESS_PAIRS] != [])
def _check_update_has_allowed_address_pairs(self, port):
"""Determine if request has an allowed address pair.
Return True if the port parameter has a non-empty
'allowed_address_pairs' attribute. Otherwise returns False.
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
self._has_address_pairs(port))
def _check_update_deletes_allowed_address_pairs(self, port):
"""Determine if request deletes address pair.
Return True if port has as a allowed address pair and its value
is either [] or not is_attr_set, otherwise return False
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
not self._has_address_pairs(port))
def is_address_pairs_attribute_updated(self, port, update_attrs):
"""Check if the address pairs attribute is being updated.
Returns True if there is an update. This can be used to decide
if a port update notification should be sent to agents or third
party controllers.
"""
new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
if new_pairs is None:
return False
old_pairs = port.get(addr_pair.ADDRESS_PAIRS)
# Missing or unchanged address pairs in attributes mean no update
return new_pairs != old_pairs
def update_address_pairs_on_port(self, context, port_id, port,
original_port, updated_port):
"""Update allowed address pairs on port.
Returns True if an update notification is required. Notification
is not done here because other changes on the port may need
notification. This method is expected to be called within
a transaction.
"""
new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)
if self.is_address_pairs_attribute_updated(original_port,
port['port']):
updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
self._delete_allowed_address_pairs(context, port_id)
self._process_create_allowed_address_pairs(
context, updated_port, new_pairs)
return True
return False

@ -1,287 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DhcpRpcCallbackMixin(object):
"""A mix-in that enable DHCP agent support in plugin implementations."""
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters)
return nets
def _port_action(self, plugin, context, port, action):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return plugin.create_port(context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port['port'])
else:
msg = _('Unrecognized action')
raise n_exc.Invalid(message=msg)
except (db_exc.DBError, n_exc.NetworkNotFound,
n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
if isinstance(e, n_exc.IpAddressGenerationFailure):
# Check if the subnet still exists and if it does not,
# this is the reason why the ip address generation failed.
# In any other unlikely event re-raise
try:
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
plugin.get_subnet(context, subnet_id)
except n_exc.SubnetNotFound:
pass
else:
ctxt.reraise = True
net_id = port['port']['network_id']
LOG.warn(_("Action %(action)s for network %(net_id)s "
"could not complete successfully: %(reason)s")
% {"action": action, "net_id": net_id, 'reason': e})
def get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active network ids."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.debug(_('get_active_networks requested from %s'), host)
nets = self._get_active_networks(context, **kwargs)
return [net['id'] for net in nets]
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug(_('get_active_networks_info from %s'), host)
networks = self._get_active_networks(context, **kwargs)
plugin = manager.NeutronManager.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
filters['enable_dhcp'] = [True]
subnets = plugin.get_subnets(context, filters=filters)
for network in networks:
network['subnets'] = [subnet for subnet in subnets
if subnet['network_id'] == network['id']]
network['ports'] = [port for port in ports
if port['network_id'] == network['id']]
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return a extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug(_('Network %(network_id)s requested from '
'%(host)s'), {'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
filters = dict(network_id=[network_id])
network['subnets'] = plugin.get_subnets(context, filters=filters)
network['ports'] = plugin.get_ports(context, filters=filters)
return network
def get_dhcp_port(self, context, **kwargs):
"""Allocate a DHCP port for the host and return port information.
This method will re-use an existing port if one already exists. When a
port is re-used, the fixed_ip allocation will be updated to the current
network state. If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
LOG.debug(_('Port %(device_id)s for %(network_id)s requested from '
'%(host)s'), {'device_id': device_id,
'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
retval = None
filters = dict(network_id=[network_id])
subnets = dict([(s['id'], s) for s in
plugin.get_subnets(context, filters=filters)])
dhcp_enabled_subnet_ids = [s['id'] for s in
subnets.values() if s['enable_dhcp']]
try:
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
# Ensure that fixed_ips cover all dhcp_enabled subnets.
port = ports[0]
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
port['fixed_ips'].extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = plugin.update_port(context, port['id'],
dict(port=port))
except n_exc.NotFound as e:
LOG.warning(e)
if retval is None:
# No previous port exists, so create a new one.
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s '
'does not exist on %(host)s'),
{'device_id': device_id,
'network_id': network_id,
'host': host})
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
port_dict = dict(
admin_state_up=True,
device_id=device_id,
network_id=network_id,
tenant_id=network['tenant_id'],
mac_address=attributes.ATTR_NOT_SPECIFIED,
name='',
device_owner=constants.DEVICE_OWNER_DHCP,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = self._port_action(plugin, context, {'port': port_dict},
'create_port')
if not retval:
return
# Convert subnet_id to subnet dict
for fixed_ip in retval['fixed_ips']:
subnet_id = fixed_ip.pop('subnet_id')
fixed_ip['subnet'] = subnets[subnet_id]
return retval
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug(_('DHCP port deletion for %(network_id)s request from '
'%(host)s'),
{'network_id': network_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
plugin.delete_ports_by_device_id(context, device_id, network_id)
def release_port_fixed_ip(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
subnet_id = kwargs.get('subnet_id')
LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request '
'from %(host)s'),
{'subnet_id': subnet_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
port = ports[0]
fixed_ips = port.get('fixed_ips', [])
for i in range(len(fixed_ips)):
if fixed_ips[i]['subnet_id'] == subnet_id:
del fixed_ips[i]
break
plugin.update_port(context, port['id'], dict(port=port))
def update_lease_expiration(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.warning(_('Updating lease expiration is now deprecated. Issued '
'from host %s.'), host)
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
port = kwargs.get('port')
LOG.debug(_('Create dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context, port, 'create_port')
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port_id = kwargs.get('port_id')
port = kwargs.get('port')
LOG.debug(_('Update dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context,
{'id': port_id, 'port': port},
'update_port')

@ -1,163 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron import manager
from neutron.plugins.common import constants as service_constants
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
class ExternalNetwork(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# Add a relationship to the Network model in order to instruct
# SQLAlchemy to eagerly load this association
network = orm.relationship(
models_v2.Network,
backref=orm.backref("external", lazy='joined',
uselist=False, cascade='delete'))
class External_net_db_mixin(object):
"""Mixin class to add external network methods to db_base_plugin_v2."""
def _network_model_hook(self, context, original_model, query):
query = query.outerjoin(ExternalNetwork,
(original_model.id ==
ExternalNetwork.network_id))
return query
def _network_filter_hook(self, context, original_model, conditions):
if conditions is not None and not hasattr(conditions, '__iter__'):
conditions = (conditions, )
# Apply the external network filter only in non-admin context
if not context.is_admin and hasattr(original_model, 'tenant_id'):
conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
*conditions)
return conditions
def _network_result_filter_hook(self, query, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return query
if vals[0]:
return query.filter((ExternalNetwork.network_id != expr.null()))
return query.filter((ExternalNetwork.network_id == expr.null()))
# TODO(salvatore-orlando): Perform this operation without explicitly
# referring to db_base_plugin_v2, as plugins that do not extend from it
# might exist in the future
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Network,
"external_net",
'_network_model_hook',
'_network_filter_hook',
'_network_result_filter_hook')
def _network_is_external(self, context, net_id):
try:
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).one()
return True
except exc.NoResultFound:
return False
def _extend_network_dict_l3(self, network_res, network_db):
# Comparing with None for converting uuid into bool
network_res[external_net.EXTERNAL] = network_db.external is not None
return network_res
# Register dict extend functions for networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_l3'])
def _process_l3_create(self, context, net_data, req_data):
external = req_data.get(external_net.EXTERNAL)
external_set = attributes.is_attr_set(external)
if not external_set:
return
if external:
# expects to be called within a plugin's session
context.session.add(ExternalNetwork(network_id=net_data['id']))
net_data[external_net.EXTERNAL] = external
def _process_l3_update(self, context, net_data, req_data):
new_value = req_data.get(external_net.EXTERNAL)
net_id = net_data['id']
if not attributes.is_attr_set(new_value):
return
if net_data.get(external_net.EXTERNAL) == new_value:
return
if new_value:
context.session.add(ExternalNetwork(network_id=net_id))
net_data[external_net.EXTERNAL] = True
else:
# must make sure we do not have any external gateway ports
# (and thus, possible floating IPs) on this network before
# allow it to be update to external=False
port = context.session.query(models_v2.Port).filter_by(
device_owner=DEVICE_OWNER_ROUTER_GW,
network_id=net_data['id']).first()
if port:
raise external_net.ExternalNetworkInUse(net_id=net_id)
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).delete()
net_data[external_net.EXTERNAL] = False
def _process_l3_delete(self, context, network_id):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin:
l3plugin.delete_disassociated_floatingips(context, network_id)
def _filter_nets_l3(self, context, nets, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return nets
ext_nets = set(en['network_id']
for en in context.session.query(ExternalNetwork))
if vals[0]:
return [n for n in nets if n['id'] in ext_nets]
else:
return [n for n in nets if n['id'] not in ext_nets]
def get_external_network_id(self, context):
nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
if len(nets) > 1:
raise n_exc.TooManyExternalNetworks()
else:
return nets[0]['id'] if nets else None

@ -1,127 +0,0 @@
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Don Kehn, dekehn@gmail.com
#
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId):
"""Represent a generic concept of extra options associated to a port.
Each port may have none to many dhcp opts associated to it that can
define specifically different or extra options to DHCP clients.
These will be written to the <network_id>/opts files, and each option's
tag will be referenced in the <network_id>/host file.
"""
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=False)
opt_name = sa.Column(sa.String(64), nullable=False)
opt_value = sa.Column(sa.String(255), nullable=False)
__table_args__ = (sa.UniqueConstraint('port_id',
'opt_name',
name='uidx_portid_optname'),
model_base.BASEV2.__table_args__,)
# Add a relationship to the Port model in order to instruct SQLAlchemy to
# eagerly load extra_dhcp_opts bindings
ports = orm.relationship(
models_v2.Port,
backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
class ExtraDhcpOptMixin(object):
"""Mixin class to add extra options to the DHCP opts file
and associate them to a port.
"""
def _process_port_create_extra_dhcp_opts(self, context, port,
extra_dhcp_opts):
if not extra_dhcp_opts:
return port
with context.session.begin(subtransactions=True):
for dopt in extra_dhcp_opts:
if dopt['opt_value']:
db = ExtraDhcpOpt(
port_id=port['id'],
opt_name=dopt['opt_name'],
opt_value=dopt['opt_value'])
context.session.add(db)
return self._extend_port_extra_dhcp_opts_dict(context, port)
def _extend_port_extra_dhcp_opts_dict(self, context, port):
port[edo_ext.EXTRADHCPOPTS] = self._get_port_extra_dhcp_opts_binding(
context, port['id'])
def _get_port_extra_dhcp_opts_binding(self, context, port_id):
query = self._model_query(context, ExtraDhcpOpt)
binding = query.filter(ExtraDhcpOpt.port_id == port_id)
return [{'opt_name': r.opt_name, 'opt_value': r.opt_value}
for r in binding]
def _update_extra_dhcp_opts_on_port(self, context, id, port,
updated_port=None):
# It is not necessary to update in a transaction, because
# its called from within one from ovs_neutron_plugin.
dopts = port['port'].get(edo_ext.EXTRADHCPOPTS)
if dopts:
opt_db = self._model_query(
context, ExtraDhcpOpt).filter_by(port_id=id).all()
# if there are currently no dhcp_options associated to
# this port, Then just insert the new ones and be done.
with context.session.begin(subtransactions=True):
for upd_rec in dopts:
for opt in opt_db:
if opt['opt_name'] == upd_rec['opt_name']:
# to handle deleting of a opt from the port.
if upd_rec['opt_value'] is None:
context.session.delete(opt)
elif opt['opt_value'] != upd_rec['opt_value']:
opt.update(
{'opt_value': upd_rec['opt_value']})
break
else:
if upd_rec['opt_value'] is not None:
db = ExtraDhcpOpt(
port_id=id,
opt_name=upd_rec['opt_name'],
opt_value=upd_rec['opt_value'])
context.session.add(db)
if updated_port:
edolist = self._get_port_extra_dhcp_opts_binding(context, id)
updated_port[edo_ext.EXTRADHCPOPTS] = edolist
return bool(dopts)
def _extend_port_dict_extra_dhcp_opt(self, res, port):
res[edo_ext.EXTRADHCPOPTS] = [{'opt_name': dho.opt_name,
'opt_value': dho.opt_value}
for dho in port.dhcp_opts]
return res
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_dict_extra_dhcp_opt'])

@ -1,185 +0,0 @@
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.common import utils
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
extra_route_opts = [
#TODO(nati): use quota framework when it support quota for attributes
cfg.IntOpt('max_routes', default=30,
help=_("Maximum number of routes")),
]
cfg.CONF.register_opts(extra_route_opts)
class RouterRoute(model_base.BASEV2, models_v2.Route):
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True)
router = orm.relationship(l3_db.Router,
backref=orm.backref("route_list",
lazy='joined',
cascade='delete'))
class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin):
"""Mixin class to support extra route configuration on router."""
def _extend_router_dict_extraroute(self, router_res, router_db):
router_res['routes'] = (ExtraRoute_db_mixin.
_make_extra_route_list(
router_db['route_list']
))
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_router_dict_extraroute'])
def update_router(self, context, id, router):
r = router['router']
with context.session.begin(subtransactions=True):
#check if route exists and have permission to access
router_db = self._get_router(context, id)
if 'routes' in r:
self._update_extra_routes(context, router_db, r['routes'])
routes = self._get_extra_routes_by_router_id(context, id)
router_updated = super(ExtraRoute_db_mixin, self).update_router(
context, id, router)
router_updated['routes'] = routes
return router_updated
def _get_subnets_by_cidr(self, context, cidr):
query_subnets = context.session.query(models_v2.Subnet)
return query_subnets.filter_by(cidr=cidr).all()
def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
#Note(nati): Nexthop should be connected,
# so we need to check
# nexthop belongs to one of cidrs of the router ports
if not netaddr.all_matching_cidrs(nexthop, cidrs):
raise extraroute.InvalidRoutes(
routes=routes,
reason=_('the nexthop is not connected with router'))
#Note(nati) nexthop should not be same as fixed_ips
if nexthop in ips:
raise extraroute.InvalidRoutes(
routes=routes,
reason=_('the nexthop is used by router'))
def _validate_routes(self, context,
router_id, routes):
if len(routes) > cfg.CONF.max_routes:
raise extraroute.RoutesExhausted(
router_id=router_id,
quota=cfg.CONF.max_routes)
filters = {'device_id': [router_id]}
ports = self._core_plugin.get_ports(context, filters)
cidrs = []
ips = []
for port in ports:
for ip in port['fixed_ips']:
cidrs.append(self._core_plugin._get_subnet(
context, ip['subnet_id'])['cidr'])
ips.append(ip['ip_address'])
for route in routes:
self._validate_routes_nexthop(
cidrs, ips, routes, route['nexthop'])
def _update_extra_routes(self, context, router, routes):
self._validate_routes(context, router['id'],
routes)
old_routes, routes_dict = self._get_extra_routes_dict_by_router_id(
context, router['id'])
added, removed = utils.diff_list_of_dict(old_routes,
routes)
LOG.debug(_('Added routes are %s'), added)
for route in added:
router_routes = RouterRoute(
router_id=router['id'],
destination=route['destination'],
nexthop=route['nexthop'])
context.session.add(router_routes)
LOG.debug(_('Removed routes are %s'), removed)
for route in removed:
context.session.delete(
routes_dict[(route['destination'], route['nexthop'])])
@staticmethod
def _make_extra_route_list(extra_routes):
return [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in extra_routes]
def _get_extra_routes_by_router_id(self, context, id):
query = context.session.query(RouterRoute)
query = query.filter_by(router_id=id)
return self._make_extra_route_list(query)
def _get_extra_routes_dict_by_router_id(self, context, id):
query = context.session.query(RouterRoute)
query = query.filter_by(router_id=id)
routes = []
routes_dict = {}
for route in query:
routes.append({'destination': route['destination'],
'nexthop': route['nexthop']})
routes_dict[(route['destination'], route['nexthop'])] = route
return routes, routes_dict
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
router = super(ExtraRoute_db_mixin, self).get_router(
context, id, fields)
return router
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
with context.session.begin(subtransactions=True):
routers = super(ExtraRoute_db_mixin, self).get_routers(
context, filters, fields, sorts=sorts, limit=limit,
marker=marker, page_reverse=page_reverse)
return routers
def _confirm_router_interface_not_in_use(self, context, router_id,
subnet_id):
super(ExtraRoute_db_mixin, self)._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
subnet_db = self._core_plugin._get_subnet(context, subnet_id)
subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
extra_routes = self._get_extra_routes_by_router_id(context, router_id)
for route in extra_routes:
if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]):
raise extraroute.RouterInterfaceInUseByRoute(
router_id=router_id, subnet_id=subnet_id)

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

@ -1,481 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#