add timestamp patch for cascaded neutron server
Change-Id: Ie28b643009964fa4246f6df937d047d81676ac3c
This commit is contained in:
parent
b7e6e4dccf
commit
83da6bf451
|
@ -0,0 +1,103 @@
|
|||
#!/bin/bash
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
_NEUTRON_CONF_DIR="/etc/neutron"
|
||||
_NEUTRON_CONF_FILE='neutron.conf'
|
||||
_NEUTRON_INSTALL="/usr/lib/python2.7/dist-packages"
|
||||
_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron"
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../neutron/"
|
||||
_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-cascaded-timestamp-patch-installation-backup"
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##Redirecting output to logfile as well as stdout
|
||||
#exec > >(tee -a ${_SCRIPT_LOGFILE})
|
||||
#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2)
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
echo "checking installation directories..."
|
||||
if [ ! -d "${_NEUTRON_DIR}" ] ; then
|
||||
echo "Could not find the neutron installation. Please check the variables in the beginning of the script."
|
||||
echo "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then
|
||||
echo "Could not find neutron config file. Please check the variables in the beginning of the script."
|
||||
echo "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/neutron" ] ; then
|
||||
echo "It seems neutron-server-cascaded-timestamp-patch has already been installed!"
|
||||
echo "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "backing up current files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}"
|
||||
cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/neutron"
|
||||
echo "Error in code backup, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "Error in copying, aborted."
|
||||
echo "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron"
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "upgrade DB for cascaded-timestamp-patch..."
|
||||
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "There was an error in upgrading DB for cascaded-timestamp-patch, please check cascacaded neutron server code manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "restarting cascaded neutron server..."
|
||||
service neutron-server restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "There was an error in restarting the service, please restart cascaded neutron server manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "restarting cascaded neutron-plugin-openvswitch-agent..."
|
||||
service neutron-plugin-openvswitch-agent restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "There was an error in restarting the service, please restart cascaded neutron-plugin-openvswitch-agent manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "restarting cascaded neutron-l3-agent..."
|
||||
service neutron-l3-agent restart
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "There was an error in restarting the service, please restart cascaded neutron-l3-agent manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Completed."
|
||||
echo "See README to get started."
|
||||
exit 0
|
|
@ -0,0 +1,203 @@
|
|||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import weakref
|
||||
|
||||
from sqlalchemy import sql
|
||||
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import sqlalchemyutils
|
||||
|
||||
from neutron.openstack.common import timeutils
|
||||
|
||||
class CommonDbMixin(object):
|
||||
"""Common methods used in core and service plugins."""
|
||||
# Plugins, mixin classes implementing extension will register
|
||||
# hooks into the dict below for "augmenting" the "core way" of
|
||||
# building a query for retrieving objects from a model class.
|
||||
# To this aim, the register_model_query_hook and unregister_query_hook
|
||||
# from this class should be invoked
|
||||
_model_query_hooks = {}
|
||||
|
||||
# This dictionary will store methods for extending attributes of
|
||||
# api resources. Mixins can use this dict for adding their own methods
|
||||
# TODO(salvatore-orlando): Avoid using class-level variables
|
||||
_dict_extend_functions = {}
|
||||
|
||||
@classmethod
|
||||
def register_model_query_hook(cls, model, name, query_hook, filter_hook,
|
||||
result_filters=None):
|
||||
"""Register a hook to be invoked when a query is executed.
|
||||
|
||||
Add the hooks to the _model_query_hooks dict. Models are the keys
|
||||
of this dict, whereas the value is another dict mapping hook names to
|
||||
callables performing the hook.
|
||||
Each hook has a "query" component, used to build the query expression
|
||||
and a "filter" component, which is used to build the filter expression.
|
||||
|
||||
Query hooks take as input the query being built and return a
|
||||
transformed query expression.
|
||||
|
||||
Filter hooks take as input the filter expression being built and return
|
||||
a transformed filter expression
|
||||
"""
|
||||
model_hooks = cls._model_query_hooks.get(model)
|
||||
if not model_hooks:
|
||||
# add key to dict
|
||||
model_hooks = {}
|
||||
cls._model_query_hooks[model] = model_hooks
|
||||
model_hooks[name] = {'query': query_hook, 'filter': filter_hook,
|
||||
'result_filters': result_filters}
|
||||
|
||||
@property
|
||||
def safe_reference(self):
|
||||
"""Return a weakref to the instance.
|
||||
|
||||
Minimize the potential for the instance persisting
|
||||
unnecessarily in memory by returning a weakref proxy that
|
||||
won't prevent deallocation.
|
||||
"""
|
||||
return weakref.proxy(self)
|
||||
|
||||
def _model_query(self, context, model):
|
||||
query = context.session.query(model)
|
||||
# define basic filter condition for model query
|
||||
# NOTE(jkoelker) non-admin queries are scoped to their tenant_id
|
||||
# NOTE(salvatore-orlando): unless the model allows for shared objects
|
||||
query_filter = None
|
||||
if not context.is_admin and hasattr(model, 'tenant_id'):
|
||||
if hasattr(model, 'shared'):
|
||||
query_filter = ((model.tenant_id == context.tenant_id) |
|
||||
(model.shared == sql.true()))
|
||||
else:
|
||||
query_filter = (model.tenant_id == context.tenant_id)
|
||||
# Execute query hooks registered from mixins and plugins
|
||||
for _name, hooks in self._model_query_hooks.get(model,
|
||||
{}).iteritems():
|
||||
query_hook = hooks.get('query')
|
||||
if isinstance(query_hook, basestring):
|
||||
query_hook = getattr(self, query_hook, None)
|
||||
if query_hook:
|
||||
query = query_hook(context, model, query)
|
||||
|
||||
filter_hook = hooks.get('filter')
|
||||
if isinstance(filter_hook, basestring):
|
||||
filter_hook = getattr(self, filter_hook, None)
|
||||
if filter_hook:
|
||||
query_filter = filter_hook(context, model, query_filter)
|
||||
|
||||
# NOTE(salvatore-orlando): 'if query_filter' will try to evaluate the
|
||||
# condition, raising an exception
|
||||
if query_filter is not None:
|
||||
query = query.filter(query_filter)
|
||||
return query
|
||||
|
||||
def _fields(self, resource, fields):
|
||||
if fields:
|
||||
return dict(((key, item) for key, item in resource.items()
|
||||
if key in fields))
|
||||
return resource
|
||||
|
||||
def _get_tenant_id_for_create(self, context, resource):
|
||||
if context.is_admin and 'tenant_id' in resource:
|
||||
tenant_id = resource['tenant_id']
|
||||
elif ('tenant_id' in resource and
|
||||
resource['tenant_id'] != context.tenant_id):
|
||||
reason = _('Cannot create resource for another tenant')
|
||||
raise n_exc.AdminRequired(reason=reason)
|
||||
else:
|
||||
tenant_id = context.tenant_id
|
||||
return tenant_id
|
||||
|
||||
def _get_by_id(self, context, model, id):
|
||||
query = self._model_query(context, model)
|
||||
return query.filter(model.id == id).one()
|
||||
|
||||
def _apply_filters_to_query(self, query, model, filters):
|
||||
if filters:
|
||||
for key, value in filters.iteritems():
|
||||
column = getattr(model, key, None)
|
||||
if column:
|
||||
query = query.filter(column.in_(value))
|
||||
if 'changes_since' in filters:
|
||||
if isinstance(filters['changes_since'], list):
|
||||
changes_since = timeutils.parse_isotime(filters['changes_since'][0])
|
||||
else:
|
||||
changes_since = timeutils.parse_isotime(filters['changes_since'])
|
||||
updated_at = timeutils.normalize_time(changes_since)
|
||||
query = query.filter(model.updated_at >= updated_at)
|
||||
for _name, hooks in self._model_query_hooks.get(model,
|
||||
{}).iteritems():
|
||||
result_filter = hooks.get('result_filters', None)
|
||||
if isinstance(result_filter, basestring):
|
||||
result_filter = getattr(self, result_filter, None)
|
||||
|
||||
if result_filter:
|
||||
query = result_filter(query, filters)
|
||||
return query
|
||||
|
||||
def _apply_dict_extend_functions(self, resource_type,
|
||||
response, db_object):
|
||||
for func in self._dict_extend_functions.get(
|
||||
resource_type, []):
|
||||
args = (response, db_object)
|
||||
if isinstance(func, basestring):
|
||||
func = getattr(self, func, None)
|
||||
else:
|
||||
# must call unbound method - use self as 1st argument
|
||||
args = (self,) + args
|
||||
if func:
|
||||
func(*args)
|
||||
|
||||
def _get_collection_query(self, context, model, filters=None,
|
||||
sorts=None, limit=None, marker_obj=None,
|
||||
page_reverse=False):
|
||||
collection = self._model_query(context, model)
|
||||
collection = self._apply_filters_to_query(collection, model, filters)
|
||||
if limit and page_reverse and sorts:
|
||||
sorts = [(s[0], not s[1]) for s in sorts]
|
||||
collection = sqlalchemyutils.paginate_query(collection, model, limit,
|
||||
sorts,
|
||||
marker_obj=marker_obj)
|
||||
return collection
|
||||
|
||||
def _get_collection(self, context, model, dict_func, filters=None,
|
||||
fields=None, sorts=None, limit=None, marker_obj=None,
|
||||
page_reverse=False):
|
||||
query = self._get_collection_query(context, model, filters=filters,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
items = [dict_func(c, fields) for c in query]
|
||||
if limit and page_reverse:
|
||||
items.reverse()
|
||||
return items
|
||||
|
||||
def _get_collection_count(self, context, model, filters=None):
|
||||
return self._get_collection_query(context, model, filters).count()
|
||||
|
||||
def _get_marker_obj(self, context, resource, limit, marker):
|
||||
if limit and marker:
|
||||
return getattr(self, '_get_%s' % resource)(context, marker)
|
||||
return None
|
||||
|
||||
def _filter_non_model_columns(self, data, model):
|
||||
"""Remove all the attributes from data which are not columns of
|
||||
the model passed as second parameter.
|
||||
"""
|
||||
columns = [c.name for c in model.__table__.columns]
|
||||
return dict((k, v) for (k, v) in
|
||||
data.iteritems() if k in columns)
|
|
@ -0,0 +1,132 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# Initial operations for core resources
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.create_table(
|
||||
'networks',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=True),
|
||||
sa.Column('status', sa.String(length=16), nullable=True),
|
||||
sa.Column('admin_state_up', sa.Boolean(), nullable=True),
|
||||
sa.Column('shared', sa.Boolean(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'ports',
|
||||
sa.Column('created_at', sa.DateTime),
|
||||
sa.Column('updated_at', sa.DateTime),
|
||||
sa.Column('deleted_at', sa.DateTime),
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=True),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('mac_address', sa.String(length=32), nullable=False),
|
||||
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
|
||||
sa.Column('status', sa.String(length=16), nullable=False),
|
||||
sa.Column('device_id', sa.String(length=255), nullable=False),
|
||||
sa.Column('device_owner', sa.String(length=255), nullable=False),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'subnets',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=True),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('ip_version', sa.Integer(), nullable=False),
|
||||
sa.Column('cidr', sa.String(length=64), nullable=False),
|
||||
sa.Column('gateway_ip', sa.String(length=64), nullable=True),
|
||||
sa.Column('enable_dhcp', sa.Boolean(), nullable=True),
|
||||
sa.Column('shared', sa.Boolean(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'dnsnameservers',
|
||||
sa.Column('address', sa.String(length=128), nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('address', 'subnet_id'))
|
||||
|
||||
op.create_table(
|
||||
'ipallocationpools',
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('first_ip', sa.String(length=64), nullable=False),
|
||||
sa.Column('last_ip', sa.String(length=64), nullable=False),
|
||||
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'subnetroutes',
|
||||
sa.Column('destination', sa.String(length=64), nullable=False),
|
||||
sa.Column('nexthop', sa.String(length=64), nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id'))
|
||||
|
||||
op.create_table(
|
||||
'ipallocations',
|
||||
sa.Column('port_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('ip_address', sa.String(length=64), nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
|
||||
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id'))
|
||||
|
||||
op.create_table(
|
||||
'ipavailabilityranges',
|
||||
sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('first_ip', sa.String(length=64), nullable=False),
|
||||
sa.Column('last_ip', sa.String(length=64), nullable=False),
|
||||
sa.ForeignKeyConstraint(['allocation_pool_id'],
|
||||
['ipallocationpools.id'], ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'))
|
||||
|
||||
op.create_table(
|
||||
'networkdhcpagentbindings',
|
||||
sa.Column('network_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['dhcp_agent_id'], ['agents.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('network_id', 'dhcp_agent_id'))
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_table('networkdhcpagentbindings')
|
||||
op.drop_table('ipavailabilityranges')
|
||||
op.drop_table('ipallocations')
|
||||
op.drop_table('subnetroutes')
|
||||
op.drop_table('ipallocationpools')
|
||||
op.drop_table('dnsnameservers')
|
||||
op.drop_table('subnets')
|
||||
op.drop_table('ports')
|
||||
op.drop_table('networks')
|
|
@ -0,0 +1,44 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""add port timestamp revision
|
||||
|
||||
Revision ID: 238cf36dab26
|
||||
Revises: juno
|
||||
Create Date: 2014-11-27 17:04:05.835703
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '238cf36dab26'
|
||||
down_revision = 'juno'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
|
||||
def upgrade():
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('ports', sa.Column('created_at', sa.DateTime(), nullable=True))
|
||||
op.add_column('ports', sa.Column('updated_at', sa.DateTime(), nullable=True))
|
||||
### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('ports', 'updated_at')
|
||||
op.drop_column('ports', 'created_at')
|
||||
### end Alembic commands ###
|
|
@ -0,0 +1,209 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.db import model_base
|
||||
from neutron.openstack.common import uuidutils
|
||||
|
||||
from neutron.openstack.common import timeutils
|
||||
|
||||
|
||||
class HasTenant(object):
|
||||
"""Tenant mixin, add to subclasses that have a tenant."""
|
||||
|
||||
# NOTE(jkoelker) tenant_id is just a free form string ;(
|
||||
tenant_id = sa.Column(sa.String(255))
|
||||
|
||||
|
||||
class HasId(object):
|
||||
"""id mixin, add to subclasses that have an id."""
|
||||
|
||||
id = sa.Column(sa.String(36),
|
||||
primary_key=True,
|
||||
default=uuidutils.generate_uuid)
|
||||
|
||||
|
||||
class HasStatusDescription(object):
|
||||
"""Status with description mixin."""
|
||||
|
||||
status = sa.Column(sa.String(16), nullable=False)
|
||||
status_description = sa.Column(sa.String(255))
|
||||
|
||||
|
||||
class IPAvailabilityRange(model_base.BASEV2):
|
||||
"""Internal representation of available IPs for Neutron subnets.
|
||||
|
||||
Allocation - first entry from the range will be allocated.
|
||||
If the first entry is equal to the last entry then this row
|
||||
will be deleted.
|
||||
Recycling ips involves reading the IPAllocationPool and IPAllocation tables
|
||||
and inserting ranges representing available ips. This happens after the
|
||||
final allocation is pulled from this table and a new ip allocation is
|
||||
requested. Any contiguous ranges of available ips will be inserted as a
|
||||
single range.
|
||||
"""
|
||||
|
||||
allocation_pool_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ipallocationpools.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
primary_key=True)
|
||||
first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s - %s" % (self.first_ip, self.last_ip)
|
||||
|
||||
|
||||
class IPAllocationPool(model_base.BASEV2, HasId):
|
||||
"""Representation of an allocation pool in a Neutron subnet."""
|
||||
|
||||
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=True)
|
||||
first_ip = sa.Column(sa.String(64), nullable=False)
|
||||
last_ip = sa.Column(sa.String(64), nullable=False)
|
||||
available_ranges = orm.relationship(IPAvailabilityRange,
|
||||
backref='ipallocationpool',
|
||||
lazy="joined",
|
||||
cascade='all, delete-orphan')
|
||||
|
||||
def __repr__(self):
|
||||
return "%s - %s" % (self.first_ip, self.last_ip)
|
||||
|
||||
|
||||
class IPAllocation(model_base.BASEV2):
|
||||
"""Internal representation of allocated IP addresses in a Neutron subnet.
|
||||
"""
|
||||
|
||||
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=True)
|
||||
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=False, primary_key=True)
|
||||
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
|
||||
ondelete="CASCADE"),
|
||||
nullable=False, primary_key=True)
|
||||
|
||||
|
||||
class Route(object):
|
||||
"""mixin of a route."""
|
||||
|
||||
destination = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
|
||||
|
||||
class SubnetRoute(model_base.BASEV2, Route):
|
||||
|
||||
subnet_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
|
||||
class Port(model_base.BASEV2, HasId, HasTenant):
|
||||
"""Represents a port on a Neutron v2 network."""
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
|
||||
nullable=False)
|
||||
fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined')
|
||||
mac_address = sa.Column(sa.String(32), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
status = sa.Column(sa.String(16), nullable=False)
|
||||
device_id = sa.Column(sa.String(255), nullable=False)
|
||||
device_owner = sa.Column(sa.String(255), nullable=False)
|
||||
created_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow())
|
||||
updated_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(),
|
||||
onupdate=lambda: timeutils.utcnow())
|
||||
|
||||
def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
|
||||
mac_address=None, admin_state_up=None, status=None,
|
||||
device_id=None, device_owner=None, fixed_ips=None):
|
||||
self.id = id
|
||||
self.tenant_id = tenant_id
|
||||
self.name = name
|
||||
self.network_id = network_id
|
||||
self.mac_address = mac_address
|
||||
self.admin_state_up = admin_state_up
|
||||
self.device_owner = device_owner
|
||||
self.device_id = device_id
|
||||
# Since this is a relationship only set it if one is passed in.
|
||||
if fixed_ips:
|
||||
self.fixed_ips = fixed_ips
|
||||
|
||||
# NOTE(arosen): status must be set last as an event is triggered on!
|
||||
self.status = status
|
||||
|
||||
|
||||
class DNSNameServer(model_base.BASEV2):
|
||||
"""Internal representation of a DNS nameserver."""
|
||||
|
||||
address = sa.Column(sa.String(128), nullable=False, primary_key=True)
|
||||
subnet_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
|
||||
class Subnet(model_base.BASEV2, HasId, HasTenant):
|
||||
"""Represents a neutron subnet.
|
||||
|
||||
When a subnet is created the first and last entries will be created. These
|
||||
are used for the IP allocation.
|
||||
"""
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'))
|
||||
ip_version = sa.Column(sa.Integer, nullable=False)
|
||||
cidr = sa.Column(sa.String(64), nullable=False)
|
||||
gateway_ip = sa.Column(sa.String(64))
|
||||
allocation_pools = orm.relationship(IPAllocationPool,
|
||||
backref='subnet',
|
||||
lazy="joined",
|
||||
cascade='delete')
|
||||
enable_dhcp = sa.Column(sa.Boolean())
|
||||
dns_nameservers = orm.relationship(DNSNameServer,
|
||||
backref='subnet',
|
||||
cascade='all, delete, delete-orphan')
|
||||
routes = orm.relationship(SubnetRoute,
|
||||
backref='subnet',
|
||||
cascade='all, delete, delete-orphan')
|
||||
shared = sa.Column(sa.Boolean)
|
||||
ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
|
||||
constants.DHCPV6_STATEFUL,
|
||||
constants.DHCPV6_STATELESS,
|
||||
name='ipv6_ra_modes'), nullable=True)
|
||||
ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC,
|
||||
constants.DHCPV6_STATEFUL,
|
||||
constants.DHCPV6_STATELESS,
|
||||
name='ipv6_address_modes'), nullable=True)
|
||||
|
||||
|
||||
class Network(model_base.BASEV2, HasId, HasTenant):
|
||||
"""Represents a v2 neutron network."""
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
ports = orm.relationship(Port, backref='networks')
|
||||
subnets = orm.relationship(Subnet, backref='networks',
|
||||
lazy="joined")
|
||||
status = sa.Column(sa.String(16))
|
||||
admin_state_up = sa.Column(sa.Boolean)
|
||||
shared = sa.Column(sa.Boolean)
|
Loading…
Reference in New Issue