Transplant tricircle to juno version
The tricircle project is based on Openstack Icehouse version, this patch modify to adopt the Juno version. Change-Id: I1afe6122575ed2bedb6e3220177e161763ce161c
This commit is contained in:
parent
e211f7efef
commit
e6d7dc78ce
|
@ -49,6 +49,7 @@ from cinder import quota
|
|||
from cinder import utils
|
||||
from cinder import volume
|
||||
|
||||
from cinder.i18n import _
|
||||
from cinder.image import glance
|
||||
from cinder.openstack.common import excutils
|
||||
from cinder.openstack.common import log as logging
|
||||
|
@ -65,6 +66,7 @@ from eventlet.greenpool import GreenPool
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
CGQUOTAS = quota.CGQUOTAS
|
||||
|
||||
volume_manager_opts = [
|
||||
cfg.IntOpt('migration_create_volume_timeout_secs',
|
||||
|
@ -75,6 +77,10 @@ volume_manager_opts = [
|
|||
default=5,
|
||||
help='seconds between cascading and cascaded cinders'
|
||||
'when synchronizing volume data'),
|
||||
cfg.IntOpt('voltype_sync_interval',
|
||||
default=3600,
|
||||
help='seconds between cascading and cascaded cinders'
|
||||
'when synchronizing volume type and qos data'),
|
||||
cfg.BoolOpt('volume_service_inithost_offload',
|
||||
default=False,
|
||||
help='Offload pending volume delete during '
|
||||
|
@ -165,7 +171,7 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
|
||||
"""Manages attachable block storage devices."""
|
||||
|
||||
RPC_API_VERSION = '1.16'
|
||||
RPC_API_VERSION = '1.18'
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
def __init__(self, service_name=None, *args, **kwargs):
|
||||
|
@ -408,7 +414,8 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
|
||||
def create_volume(self, context, volume_id, request_spec=None,
|
||||
filter_properties=None, allow_reschedule=True,
|
||||
snapshot_id=None, image_id=None, source_volid=None):
|
||||
snapshot_id=None, image_id=None, source_volid=None,
|
||||
source_replicaid=None, consistencygroup_id=None):
|
||||
"""Creates and exports the volume."""
|
||||
|
||||
ctx_dict = context.__dict__
|
||||
|
@ -529,7 +536,6 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
if self._change_since_time is None:
|
||||
search_opt = {'all_tenants': True}
|
||||
volumes = cinderClient.volumes.list(search_opts=search_opt)
|
||||
volumetypes = cinderClient.volume_types.list()
|
||||
LOG.info(_('Cascade info: change since time is none,'
|
||||
'volumes:%s'), volumes)
|
||||
else:
|
||||
|
@ -544,7 +550,6 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
search_op = {'all_tenants': True,
|
||||
'changes-since': new_change_since_isotime}
|
||||
volumes = cinderClient.volumes.list(search_opts=search_op)
|
||||
volumetypes = cinderClient.volume_types.list()
|
||||
LOG.info(_('Cascade info: search time is not none,'
|
||||
'volumes:%s'), volumes)
|
||||
|
||||
|
@ -563,13 +568,18 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
'attach_time': timeutils.strtime()
|
||||
})
|
||||
elif volume_status == "available":
|
||||
if volume._info['bootable'].lower() == 'false':
|
||||
bv = '0'
|
||||
else:
|
||||
bv = '1'
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': volume._info['status'],
|
||||
'attach_status': 'detached',
|
||||
'instance_uuid': None,
|
||||
'attached_host': None,
|
||||
'mountpoint': None,
|
||||
'attach_time': None
|
||||
'attach_time': None,
|
||||
'bootable': bv
|
||||
})
|
||||
else:
|
||||
self.db.volume_update(context, volume_id,
|
||||
|
@ -577,19 +587,79 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
LOG.info(_('Cascade info: Updated the volume %s status from'
|
||||
'cinder-proxy'), volume_id)
|
||||
|
||||
vol_types = self.db.volume_type_get_all(context, inactive=False)
|
||||
for volumetype in volumetypes:
|
||||
volume_type_name = volumetype._info['name']
|
||||
if volume_type_name not in vol_types.keys():
|
||||
extra_specs = volumetype._info['extra_specs']
|
||||
self.db.volume_type_create(
|
||||
context,
|
||||
dict(name=volume_type_name, extra_specs=extra_specs))
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_('Failed to sys volume status to db.'))
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.voltype_sync_interval,
|
||||
run_immediately=True)
|
||||
def _heal_volumetypes_and_qos(self, context):
|
||||
|
||||
try:
|
||||
|
||||
cinderClient = self._get_cinder_cascaded_admin_client()
|
||||
|
||||
volumetypes = cinderClient.volume_types.list()
|
||||
qosSpecs = cinderClient.qos_specs.list()
|
||||
|
||||
volname_type_list = []
|
||||
vol_types = self.db.volume_type_get_all(context, inactive=False)
|
||||
LOG.debug(_("cascade info, vol_types cascading :%s"), vol_types)
|
||||
for vol_type in vol_types:
|
||||
volname_type_list.append(vol_type)
|
||||
for volumetype in volumetypes:
|
||||
LOG.debug(_("cascade info, vol types cascaded :%s"),
|
||||
volumetype)
|
||||
volume_type_name = volumetype._info['name']
|
||||
if volume_type_name not in vol_types.keys():
|
||||
extraspec = volumetype._info['extra_specs']
|
||||
self.db.volume_type_create(
|
||||
context,
|
||||
dict(name=volume_type_name, extra_specs=extraspec))
|
||||
|
||||
qos_specs = self.db.qos_specs_get_all(context, inactive=False)
|
||||
qosname_list_cascading = []
|
||||
for qos_cascading in qos_specs:
|
||||
qosname_list_cascading.append(qos_cascading['name'])
|
||||
for qos_cascaded in qosSpecs:
|
||||
qos_name_cascaded = qos_cascaded._info['name']
|
||||
if qos_name_cascaded not in qosname_list_cascading:
|
||||
qos_create_val = {}
|
||||
qos_create_val['name'] = qos_name_cascaded
|
||||
qos_spec_value = qos_cascaded._info['specs']
|
||||
qos_spec_value['consumer'] = \
|
||||
qos_cascaded._info['consumer']
|
||||
qos_create_val['qos_specs'] = qos_spec_value
|
||||
LOG.info(_('Cascade info, create qos_spec %sin db'),
|
||||
qos_name_cascaded)
|
||||
self.db.qos_specs_create(context, qos_create_val)
|
||||
LOG.info(_('Cascade info, qos_spec finished %sin db'),
|
||||
qos_create_val)
|
||||
|
||||
qos_specs_id = qos_cascading['id']
|
||||
assoc_ccd =\
|
||||
self.db.volume_type_qos_associations_get(context,
|
||||
qos_specs_id)
|
||||
qos_id = qos_cascaded._info['id']
|
||||
association =\
|
||||
cinderClient.qos_specs.get_associations(qos_id)
|
||||
|
||||
for assoc in association:
|
||||
assoc_name = assoc._info['name']
|
||||
LOG.debug(_("Cascade info, assoc name %s"), assoc_name)
|
||||
if assoc_ccd is None or assoc_name not in assoc_ccd:
|
||||
voltype = \
|
||||
self.db.volume_type_get_by_name(context,
|
||||
assoc_name)
|
||||
LOG.debug(_("Cascade info, voltypes %s"), voltype)
|
||||
self.db.qos_specs_associate(context,
|
||||
qos_cascading['id'],
|
||||
voltype['id'],)
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_('Failed to sys volume type to db.'))
|
||||
|
||||
@locked_volume_operation
|
||||
def delete_volume(self, context, volume_id, unmanage_only=False):
|
||||
"""Deletes and unexports volume."""
|
||||
|
@ -606,9 +676,6 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
if volume_ref['attach_status'] == "attached":
|
||||
# Volume is still attached, need to detach first
|
||||
raise exception.VolumeAttached(volume_id=volume_id)
|
||||
if volume_ref['host'] != self.host:
|
||||
raise exception.InvalidVolume(
|
||||
reason=_("volume is not local to this node"))
|
||||
|
||||
self._notify_about_volume_usage(context, volume_ref, "delete.start")
|
||||
self._reset_stats()
|
||||
|
@ -919,6 +986,15 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
LOG.debug(_('Cascade info:upload volume to image,finish update'
|
||||
'image %s locations %s.'), (image_id, locations))
|
||||
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
if (volume['instance_uuid'] is None and
|
||||
volume['attached_host'] is None):
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'available'})
|
||||
else:
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'in-use'})
|
||||
|
||||
def accept_transfer(self, context, volume_id, new_user, new_project):
|
||||
# NOTE(jdg): need elevated context as we haven't "given" the vol
|
||||
# yet
|
||||
|
@ -1021,16 +1097,21 @@ class CinderProxy(manager.SchedulerDependentManager):
|
|||
def _report_driver_status(self, context):
|
||||
LOG.info(_("Updating fake volume status"))
|
||||
fake_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0'
|
||||
volume_stats = {'QoS_support': False,
|
||||
'location_info': fake_location_info,
|
||||
'volume_backend_name': 'LVM_iSCSI',
|
||||
'free_capacity_gb': 1024,
|
||||
'driver_version': '2.0.0',
|
||||
'total_capacity_gb': 1024,
|
||||
'reserved_percentage': 0,
|
||||
'vendor_name': 'Open Source',
|
||||
'storage_protocol': 'iSCSI'
|
||||
}
|
||||
|
||||
volume_stats = {
|
||||
'pools': [{
|
||||
'pool_name': 'LVM_iSCSI',
|
||||
'QoS_support': False,
|
||||
'free_capacity_gb': 10240.0,
|
||||
'location_info': fake_location_info,
|
||||
'total_capacity_gb': 10240.0,
|
||||
'reserved_percentage': 0
|
||||
}],
|
||||
'driver_version': '2.0.0',
|
||||
'vendor_name': 'OpenSource',
|
||||
'volume_backend_name': 'LVM_iSCSI',
|
||||
'storage_protocol': 'iSCSI'}
|
||||
|
||||
self.update_service_capabilities(volume_stats)
|
||||
|
||||
def publish_service_capabilities(self, context):
|
||||
|
|
|
@ -19,7 +19,7 @@ _CINDER_DIR="/usr/lib64/python2.6/site-packages/cinder"
|
|||
_CINDER_INSTALL_LOG="/var/log/cinder/cinder-proxy/installation/install.log"
|
||||
|
||||
# please set the option list set in cinder configure file
|
||||
_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1")
|
||||
_CINDER_CONF_OPTION=("volume_manager=cinder.volume.cinder_proxy.CinderProxy volume_sync_interval=5 voltype_sync_interval=3600 periodic_interval=5 cinder_tenant_name=admin cinder_username=admin cinder_password=1234 keystone_auth_url=http://10.67.148.210:5000/v2.0/ glance_cascading_flag=False cascading_glance_url=10.67.148.210:9292 cascaded_glance_url=http://10.67.148.201:9292 cascaded_cinder_url=http://10.67.148.201:8776/v2/%(project_id)s cascaded_region_name=Region_AZ1 cascaded_available_zone=AZ1")
|
||||
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
|
|
|
@ -130,7 +130,7 @@ Besides glance-api.conf file, we add some new config files. They are described s
|
|||
#will sync to. (e.g. physicalOpenstack001, physicalOpenstack002)
|
||||
snapshot_region_names =
|
||||
|
||||
- Last but also important, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance.
|
||||
- Last but not least, we add a yaml file for config the store backend's copy : glance_store.yaml in cascading glance.
|
||||
these config correspond to various store scheme (at present, only filesystem is supported), the values
|
||||
are based on your environment, so you have to config it before installation or restart the glance-sync
|
||||
when modify it.
|
||||
|
|
|
@ -8,6 +8,9 @@ bind_host = 0.0.0.0
|
|||
# Port the bind the API server to
|
||||
bind_port = 9595
|
||||
|
||||
#worker number
|
||||
workers = 3
|
||||
|
||||
# Log to this file. Make sure you do not set the same log file for both the API
|
||||
# and registry servers!
|
||||
#
|
||||
|
@ -21,7 +24,7 @@ backlog = 4096
|
|||
#How to sync the image, the value can be ["None", "ALL", "USER"]
|
||||
#When "ALL" choosen, means to sync to all the cascaded glances;
|
||||
#When "USER" choosen, means according to user's role, project, etc.
|
||||
sync_strategy = None
|
||||
sync_strategy = All
|
||||
|
||||
#What the cascading glance endpoint is .
|
||||
cascading_endpoint_url = http://127.0.0.1:9292/
|
||||
|
@ -43,7 +46,7 @@ scp_copy_timeout = 3600
|
|||
|
||||
#When snapshot, one can set the specific regions in which the snapshot
|
||||
#will sync to.
|
||||
snapshot_region_names = physicalOpenstack001, physicalOpenstack002
|
||||
snapshot_region_names = CascadedOne, CascadedTwo
|
||||
|
||||
[keystone_authtoken]
|
||||
auth_host = 127.0.0.1
|
||||
|
@ -51,7 +54,7 @@ auth_port = 35357
|
|||
auth_protocol = http
|
||||
admin_tenant_name = admin
|
||||
admin_user = glance
|
||||
admin_password = glance
|
||||
admin_password = openstack
|
||||
[paste_deploy]
|
||||
config_file = /etc/glance/glance-sync-paste.ini
|
||||
flavor=keystone
|
||||
|
|
|
@ -23,8 +23,7 @@ import eventlet
|
|||
import os
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from glance.common import utils
|
||||
# Monkey patch socket and time
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
|
||||
|
||||
|
@ -43,6 +42,11 @@ from glance.openstack.common import log
|
|||
import glance.sync
|
||||
|
||||
|
||||
def fail(returncode, e):
|
||||
sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e))
|
||||
sys.exit(returncode)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
config.parse_args(default_config_files='glance-sync.conf')
|
||||
|
@ -51,8 +55,10 @@ def main():
|
|||
server = wsgi.Server()
|
||||
server.start(config.load_paste_app('glance-sync'), default_port=9595)
|
||||
server.wait()
|
||||
except exception.WorkerCreationFailure as e:
|
||||
fail(2, e)
|
||||
except RuntimeError as e:
|
||||
sys.exit("ERROR: %s" % e)
|
||||
fail(1, e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -26,10 +26,7 @@ from oslo.config import cfg
|
|||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from glance.common import exception
|
||||
from glance.common import utils
|
||||
from glance.openstack.common import importutils
|
||||
from glance.openstack.common import jsonutils
|
||||
from glance.openstack.common import threadgroup
|
||||
from glance.openstack.common import timeutils
|
||||
import glance.openstack.common.log as logging
|
||||
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
[console_scripts]
|
||||
glance-api = glance.cmd.api:main
|
||||
glance-cache-cleaner = glance.cmd.cache_cleaner:main
|
||||
glance-cache-manage = glance.cmd.cache_manage:main
|
||||
glance-cache-prefetcher = glance.cmd.cache_prefetcher:main
|
||||
glance-cache-pruner = glance.cmd.cache_pruner:main
|
||||
glance-control = glance.cmd.control:main
|
||||
glance-manage = glance.cmd.manage:main
|
||||
glance-registry = glance.cmd.registry:main
|
||||
glance-replicator = glance.cmd.replicator:main
|
||||
glance-scrubber = glance.cmd.scrubber:main
|
||||
|
||||
[glance.common.image_location_strategy.modules]
|
||||
location_order_strategy = glance.common.location_strategy.location_order
|
||||
store_type_strategy = glance.common.location_strategy.store_type
|
||||
|
||||
[glance.sync.store.location]
|
||||
filesystem = glance.sync.store._drivers.filesystem:LocationCreator
|
||||
|
||||
[glance.sync.store.driver]
|
||||
filesystem = glance.sync.store._drivers.filesystem:Store
|
|
@ -1,814 +0,0 @@
|
|||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import copy
|
||||
import re
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from glance.common import exception
|
||||
from glance.common import utils
|
||||
import glance.context
|
||||
import glance.domain.proxy
|
||||
from glance.openstack.common import importutils
|
||||
import glance.openstack.common.log as logging
|
||||
from glance import scrubber
|
||||
from glance.store import location
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
store_opts = [
|
||||
cfg.ListOpt('known_stores',
|
||||
default=[
|
||||
'glance.store.filesystem.Store',
|
||||
'glance.store.http.Store'
|
||||
],
|
||||
help=_('List of which store classes and store class locations '
|
||||
'are currently known to glance at startup.')),
|
||||
cfg.StrOpt('default_store', default='file',
|
||||
help=_("Default scheme to use to store image data. The "
|
||||
"scheme must be registered by one of the stores "
|
||||
"defined by the 'known_stores' config option.")),
|
||||
cfg.StrOpt('scrubber_datadir',
|
||||
default='/var/lib/glance/scrubber',
|
||||
help=_('Directory that the scrubber will use to track '
|
||||
'information about what to delete. '
|
||||
'Make sure this is set in glance-api.conf and '
|
||||
'glance-scrubber.conf.')),
|
||||
cfg.BoolOpt('delayed_delete', default=False,
|
||||
help=_('Turn on/off delayed delete.')),
|
||||
cfg.BoolOpt('use_user_token', default=True,
|
||||
help=_('Whether to pass through the user token when '
|
||||
'making requests to the registry.')),
|
||||
cfg.IntOpt('scrub_time', default=0,
|
||||
help=_('The amount of time in seconds to delay before '
|
||||
'performing a delete.')),
|
||||
]
|
||||
|
||||
REGISTERED_STORES = set()
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(store_opts)
|
||||
|
||||
_ALL_STORES = [
|
||||
'glance.store.filesystem.Store',
|
||||
'glance.store.http.Store',
|
||||
'glance.store.rbd.Store',
|
||||
'glance.store.s3.Store',
|
||||
'glance.store.swift.Store',
|
||||
'glance.store.sheepdog.Store',
|
||||
'glance.store.cinder.Store',
|
||||
'glance.store.gridfs.Store',
|
||||
'glance.store.vmware_datastore.Store'
|
||||
]
|
||||
|
||||
|
||||
class BackendException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedBackend(BackendException):
|
||||
pass
|
||||
|
||||
|
||||
class Indexable(object):
|
||||
|
||||
"""
|
||||
Wrapper that allows an iterator or filelike be treated as an indexable
|
||||
data structure. This is required in the case where the return value from
|
||||
Store.get() is passed to Store.add() when adding a Copy-From image to a
|
||||
Store where the client library relies on eventlet GreenSockets, in which
|
||||
case the data to be written is indexed over.
|
||||
"""
|
||||
|
||||
def __init__(self, wrapped, size):
|
||||
"""
|
||||
Initialize the object
|
||||
|
||||
:param wrappped: the wrapped iterator or filelike.
|
||||
:param size: the size of data available
|
||||
"""
|
||||
self.wrapped = wrapped
|
||||
self.size = int(size) if size else (wrapped.len
|
||||
if hasattr(wrapped, 'len') else 0)
|
||||
self.cursor = 0
|
||||
self.chunk = None
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Delegate iteration to the wrapped instance.
|
||||
"""
|
||||
for self.chunk in self.wrapped:
|
||||
yield self.chunk
|
||||
|
||||
def __getitem__(self, i):
|
||||
"""
|
||||
Index into the next chunk (or previous chunk in the case where
|
||||
the last data returned was not fully consumed).
|
||||
|
||||
:param i: a slice-to-the-end
|
||||
"""
|
||||
start = i.start if isinstance(i, slice) else i
|
||||
if start < self.cursor:
|
||||
return self.chunk[(start - self.cursor):]
|
||||
|
||||
self.chunk = self.another()
|
||||
if self.chunk:
|
||||
self.cursor += len(self.chunk)
|
||||
|
||||
return self.chunk
|
||||
|
||||
def another(self):
|
||||
"""Implemented by subclasses to return the next element"""
|
||||
raise NotImplementedError
|
||||
|
||||
def getvalue(self):
|
||||
"""
|
||||
Return entire string value... used in testing
|
||||
"""
|
||||
return self.wrapped.getvalue()
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
Length accessor.
|
||||
"""
|
||||
return self.size
|
||||
|
||||
|
||||
def _register_stores(store_classes):
|
||||
"""
|
||||
Given a set of store names, add them to a globally available set
|
||||
of store names.
|
||||
"""
|
||||
for store_cls in store_classes:
|
||||
REGISTERED_STORES.add(store_cls.__module__.split('.')[2])
|
||||
# NOTE (spredzy): The actual class name is filesystem but in order
|
||||
# to maintain backward compatibility we need to keep the 'file' store
|
||||
# as a known store
|
||||
if 'filesystem' in REGISTERED_STORES:
|
||||
REGISTERED_STORES.add('file')
|
||||
|
||||
|
||||
def _get_store_class(store_entry):
|
||||
store_cls = None
|
||||
try:
|
||||
LOG.debug("Attempting to import store %s", store_entry)
|
||||
store_cls = importutils.import_class(store_entry)
|
||||
except exception.NotFound:
|
||||
raise BackendException('Unable to load store. '
|
||||
'Could not find a class named %s.'
|
||||
% store_entry)
|
||||
return store_cls
|
||||
|
||||
|
||||
def create_stores():
|
||||
"""
|
||||
Registers all store modules and all schemes
|
||||
from the given config. Duplicates are not re-registered.
|
||||
"""
|
||||
store_count = 0
|
||||
store_classes = set()
|
||||
for store_entry in set(CONF.known_stores + _ALL_STORES):
|
||||
store_entry = store_entry.strip()
|
||||
if not store_entry:
|
||||
continue
|
||||
store_cls = _get_store_class(store_entry)
|
||||
try:
|
||||
store_instance = store_cls()
|
||||
except exception.BadStoreConfiguration as e:
|
||||
if store_entry in CONF.known_stores:
|
||||
LOG.warn(_("%s Skipping store driver.") % unicode(e))
|
||||
continue
|
||||
finally:
|
||||
# NOTE(flaper87): To be removed in Juno
|
||||
if store_entry not in CONF.known_stores:
|
||||
LOG.deprecated(_("%s not found in `known_store`. "
|
||||
"Stores need to be explicitly enabled in "
|
||||
"the configuration file.") % store_entry)
|
||||
|
||||
schemes = store_instance.get_schemes()
|
||||
if not schemes:
|
||||
raise BackendException('Unable to register store %s. '
|
||||
'No schemes associated with it.'
|
||||
% store_cls)
|
||||
else:
|
||||
if store_cls not in store_classes:
|
||||
LOG.debug("Registering store %s with schemes %s",
|
||||
store_cls, schemes)
|
||||
store_classes.add(store_cls)
|
||||
scheme_map = {}
|
||||
for scheme in schemes:
|
||||
loc_cls = store_instance.get_store_location_class()
|
||||
scheme_map[scheme] = {
|
||||
'store_class': store_cls,
|
||||
'location_class': loc_cls,
|
||||
}
|
||||
location.register_scheme_map(scheme_map)
|
||||
store_count += 1
|
||||
else:
|
||||
LOG.debug("Store %s already registered", store_cls)
|
||||
_register_stores(store_classes)
|
||||
return store_count
|
||||
|
||||
|
||||
def verify_default_store():
|
||||
scheme = cfg.CONF.default_store
|
||||
context = glance.context.RequestContext()
|
||||
try:
|
||||
get_store_from_scheme(context, scheme)
|
||||
except exception.UnknownScheme:
|
||||
msg = _("Store for scheme %s not found") % scheme
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def get_known_schemes():
|
||||
"""Returns list of known schemes"""
|
||||
return location.SCHEME_TO_CLS_MAP.keys()
|
||||
|
||||
|
||||
def get_known_stores():
|
||||
"""Returns list of known stores"""
|
||||
return list(REGISTERED_STORES)
|
||||
|
||||
|
||||
def get_store_from_scheme(context, scheme, loc=None):
|
||||
"""
|
||||
Given a scheme, return the appropriate store object
|
||||
for handling that scheme.
|
||||
"""
|
||||
if scheme not in location.SCHEME_TO_CLS_MAP:
|
||||
raise exception.UnknownScheme(scheme=scheme)
|
||||
scheme_info = location.SCHEME_TO_CLS_MAP[scheme]
|
||||
store = scheme_info['store_class'](context, loc)
|
||||
return store
|
||||
|
||||
|
||||
def get_store_from_uri(context, uri, loc=None):
|
||||
"""
|
||||
Given a URI, return the store object that would handle
|
||||
operations on the URI.
|
||||
|
||||
:param uri: URI to analyze
|
||||
"""
|
||||
scheme = uri[0:uri.find('/') - 1]
|
||||
store = get_store_from_scheme(context, scheme, loc)
|
||||
return store
|
||||
|
||||
|
||||
def get_from_backend(context, uri, **kwargs):
|
||||
"""Yields chunks of data from backend specified by uri"""
|
||||
|
||||
loc = location.get_location_from_uri(uri)
|
||||
store = get_store_from_uri(context, uri, loc)
|
||||
|
||||
try:
|
||||
return store.get(loc)
|
||||
except NotImplementedError:
|
||||
raise exception.StoreGetNotSupported
|
||||
|
||||
|
||||
def get_size_from_backend(context, uri):
|
||||
"""Retrieves image size from backend specified by uri"""
|
||||
if utils.is_glance_location(uri):
|
||||
uri += ('?auth_token=' + context.auth_tok)
|
||||
loc = location.get_location_from_uri(uri)
|
||||
store = get_store_from_uri(context, uri, loc)
|
||||
return store.get_size(loc)
|
||||
|
||||
|
||||
def _check_glance_loc(context, location):
|
||||
uri = location['url']
|
||||
if not utils.is_glance_location(uri):
|
||||
return False
|
||||
if 'auth_token=' in uri:
|
||||
return True
|
||||
location['url'] = uri + ('?auth_token=' + context.auth_tok)
|
||||
return True
|
||||
|
||||
|
||||
def delete_from_backend(context, uri, **kwargs):
|
||||
"""Removes chunks of data from backend specified by uri"""
|
||||
loc = location.get_location_from_uri(uri)
|
||||
store = get_store_from_uri(context, uri, loc)
|
||||
|
||||
try:
|
||||
return store.delete(loc)
|
||||
except NotImplementedError:
|
||||
raise exception.StoreDeleteNotSupported
|
||||
|
||||
|
||||
def get_store_from_location(uri):
|
||||
"""
|
||||
Given a location (assumed to be a URL), attempt to determine
|
||||
the store from the location. We use here a simple guess that
|
||||
the scheme of the parsed URL is the store...
|
||||
|
||||
:param uri: Location to check for the store
|
||||
"""
|
||||
loc = location.get_location_from_uri(uri)
|
||||
return loc.store_name
|
||||
|
||||
|
||||
def safe_delete_from_backend(context, uri, image_id, **kwargs):
|
||||
"""Given a uri, delete an image from the store."""
|
||||
try:
|
||||
return delete_from_backend(context, uri, **kwargs)
|
||||
except exception.NotFound:
|
||||
msg = _('Failed to delete image %s in store from URI')
|
||||
LOG.warn(msg % image_id)
|
||||
except exception.StoreDeleteNotSupported as e:
|
||||
LOG.warn(six.text_type(e))
|
||||
except UnsupportedBackend:
|
||||
exc_type = sys.exc_info()[0].__name__
|
||||
msg = (_('Failed to delete image %(image_id)s from store '
|
||||
'(%(error)s)') % {'image_id': image_id,
|
||||
'error': exc_type})
|
||||
LOG.error(msg)
|
||||
|
||||
|
||||
def schedule_delayed_delete_from_backend(context, uri, image_id, **kwargs):
|
||||
"""Given a uri, schedule the deletion of an image location."""
|
||||
(file_queue, _db_queue) = scrubber.get_scrub_queues()
|
||||
# NOTE(zhiyan): Defautly ask glance-api store using file based queue.
|
||||
# In future we can change it using DB based queued instead,
|
||||
# such as using image location's status to saving pending delete flag
|
||||
# when that property be added.
|
||||
if CONF.use_user_token is False:
|
||||
context = None
|
||||
file_queue.add_location(image_id, uri, user_context=context)
|
||||
|
||||
|
||||
def delete_image_from_backend(context, store_api, image_id, uri):
|
||||
if CONF.delayed_delete:
|
||||
store_api.schedule_delayed_delete_from_backend(context, uri, image_id)
|
||||
else:
|
||||
store_api.safe_delete_from_backend(context, uri, image_id)
|
||||
|
||||
|
||||
def check_location_metadata(val, key=''):
|
||||
if isinstance(val, dict):
|
||||
for key in val:
|
||||
check_location_metadata(val[key], key=key)
|
||||
elif isinstance(val, list):
|
||||
ndx = 0
|
||||
for v in val:
|
||||
check_location_metadata(v, key='%s[%d]' % (key, ndx))
|
||||
ndx = ndx + 1
|
||||
elif not isinstance(val, unicode):
|
||||
raise BackendException(_("The image metadata key %(key)s has an "
|
||||
"invalid type of %(val)s. Only dict, list, "
|
||||
"and unicode are supported.") %
|
||||
{'key': key,
|
||||
'val': type(val)})
|
||||
|
||||
|
||||
def store_add_to_backend(image_id, data, size, store):
|
||||
"""
|
||||
A wrapper around a call to each stores add() method. This gives glance
|
||||
a common place to check the output
|
||||
|
||||
:param image_id: The image add to which data is added
|
||||
:param data: The data to be stored
|
||||
:param size: The length of the data in bytes
|
||||
:param store: The store to which the data is being added
|
||||
:return: The url location of the file,
|
||||
the size amount of data,
|
||||
the checksum of the data
|
||||
the storage systems metadata dictionary for the location
|
||||
"""
|
||||
(location, size, checksum, metadata) = store.add(image_id, data, size)
|
||||
if metadata is not None:
|
||||
if not isinstance(metadata, dict):
|
||||
msg = (_("The storage driver %(store)s returned invalid metadata "
|
||||
"%(metadata)s. This must be a dictionary type") %
|
||||
{'store': six.text_type(store),
|
||||
'metadata': six.text_type(metadata)})
|
||||
LOG.error(msg)
|
||||
raise BackendException(msg)
|
||||
try:
|
||||
check_location_metadata(metadata)
|
||||
except BackendException as e:
|
||||
e_msg = (_("A bad metadata structure was returned from the "
|
||||
"%(store)s storage driver: %(metadata)s. %(error)s.") %
|
||||
{'store': six.text_type(store),
|
||||
'metadata': six.text_type(metadata),
|
||||
'error': six.text_type(e)})
|
||||
LOG.error(e_msg)
|
||||
raise BackendException(e_msg)
|
||||
return (location, size, checksum, metadata)
|
||||
|
||||
|
||||
def add_to_backend(context, scheme, image_id, data, size):
|
||||
store = get_store_from_scheme(context, scheme)
|
||||
try:
|
||||
return store_add_to_backend(image_id, data, size, store)
|
||||
except NotImplementedError:
|
||||
raise exception.StoreAddNotSupported
|
||||
|
||||
|
||||
def set_acls(context, location_uri, public=False, read_tenants=[],
|
||||
write_tenants=[]):
|
||||
loc = location.get_location_from_uri(location_uri)
|
||||
scheme = get_store_from_location(location_uri)
|
||||
store = get_store_from_scheme(context, scheme, loc)
|
||||
try:
|
||||
store.set_acls(loc, public=public, read_tenants=read_tenants,
|
||||
write_tenants=write_tenants)
|
||||
except NotImplementedError:
|
||||
LOG.debug(_("Skipping store.set_acls... not implemented."))
|
||||
|
||||
|
||||
class ImageRepoProxy(glance.domain.proxy.Repo):
|
||||
|
||||
def __init__(self, image_repo, context, store_api):
|
||||
self.context = context
|
||||
self.store_api = store_api
|
||||
proxy_kwargs = {'context': context, 'store_api': store_api}
|
||||
super(ImageRepoProxy, self).__init__(image_repo,
|
||||
item_proxy_class=ImageProxy,
|
||||
item_proxy_kwargs=proxy_kwargs)
|
||||
|
||||
def _set_acls(self, image):
|
||||
public = image.visibility == 'public'
|
||||
member_ids = []
|
||||
if image.locations and not public:
|
||||
member_repo = image.get_member_repo()
|
||||
member_ids = [m.member_id for m in member_repo.list()]
|
||||
for location in image.locations:
|
||||
self.store_api.set_acls(self.context, location['url'], public,
|
||||
read_tenants=member_ids)
|
||||
|
||||
def add(self, image):
|
||||
result = super(ImageRepoProxy, self).add(image)
|
||||
self._set_acls(image)
|
||||
return result
|
||||
|
||||
def save(self, image):
|
||||
result = super(ImageRepoProxy, self).save(image)
|
||||
self._set_acls(image)
|
||||
return result
|
||||
|
||||
|
||||
def _check_location_uri(context, store_api, uri):
|
||||
"""
|
||||
Check if an image location uri is valid.
|
||||
|
||||
:param context: Glance request context
|
||||
:param store_api: store API module
|
||||
:param uri: location's uri string
|
||||
"""
|
||||
is_ok = True
|
||||
try:
|
||||
size = store_api.get_size_from_backend(context, uri)
|
||||
# NOTE(zhiyan): Some stores return zero when it catch exception
|
||||
is_ok = size > 0
|
||||
except (exception.UnknownScheme, exception.NotFound):
|
||||
is_ok = False
|
||||
if not is_ok:
|
||||
raise exception.BadStoreUri(_('Invalid location: %s') % uri)
|
||||
|
||||
|
||||
def _check_image_location(context, store_api, location):
|
||||
if not _check_glance_loc(context, location):
|
||||
_check_location_uri(context, store_api, location['url'])
|
||||
store_api.check_location_metadata(location['metadata'])
|
||||
|
||||
|
||||
def _remove_extra_info(location):
|
||||
url = location['url']
|
||||
if url.startswith('http'):
|
||||
start = url.find('auth_token')
|
||||
if start == -1:
|
||||
return
|
||||
end = url.find('&', start)
|
||||
if end == -1:
|
||||
if url[start - 1] == '?':
|
||||
url = re.sub(r'\?auth_token=\S+', r'', url)
|
||||
elif url[start - 1] == '&':
|
||||
url = re.sub(r'&auth_token=\S+', r'', url)
|
||||
else:
|
||||
url = re.sub(r'auth_token=\S+&', r'', url)
|
||||
|
||||
location['url'] = url
|
||||
|
||||
|
||||
def _set_image_size(context, image, locations):
|
||||
if not image.size:
|
||||
for location in locations:
|
||||
size_from_backend = glance.store.get_size_from_backend(
|
||||
context, location['url'])
|
||||
if size_from_backend:
|
||||
# NOTE(flwang): This assumes all locations have the same size
|
||||
image.size = size_from_backend
|
||||
break
|
||||
|
||||
|
||||
class ImageFactoryProxy(glance.domain.proxy.ImageFactory):
|
||||
|
||||
def __init__(self, factory, context, store_api):
|
||||
self.context = context
|
||||
self.store_api = store_api
|
||||
proxy_kwargs = {'context': context, 'store_api': store_api}
|
||||
super(ImageFactoryProxy, self).__init__(factory,
|
||||
proxy_class=ImageProxy,
|
||||
proxy_kwargs=proxy_kwargs)
|
||||
|
||||
def new_image(self, **kwargs):
|
||||
locations = kwargs.get('locations', [])
|
||||
for l in locations:
|
||||
_check_image_location(self.context, self.store_api, l)
|
||||
|
||||
if locations.count(l) > 1:
|
||||
raise exception.DuplicateLocation(location=l['url'])
|
||||
|
||||
return super(ImageFactoryProxy, self).new_image(**kwargs)
|
||||
|
||||
|
||||
class StoreLocations(collections.MutableSequence):
|
||||
|
||||
"""
|
||||
The proxy for store location property. It takes responsibility for:
|
||||
1. Location uri correctness checking when adding a new location.
|
||||
2. Remove the image data from the store when a location is removed
|
||||
from an image.
|
||||
"""
|
||||
|
||||
def __init__(self, image_proxy, value):
|
||||
self.image_proxy = image_proxy
|
||||
if isinstance(value, list):
|
||||
self.value = value
|
||||
else:
|
||||
self.value = list(value)
|
||||
|
||||
def append(self, location):
|
||||
# NOTE(flaper87): Insert this
|
||||
# location at the very end of
|
||||
# the value list.
|
||||
self.insert(len(self.value), location)
|
||||
|
||||
def extend(self, other):
|
||||
if isinstance(other, StoreLocations):
|
||||
locations = other.value
|
||||
else:
|
||||
locations = list(other)
|
||||
|
||||
for location in locations:
|
||||
self.append(location)
|
||||
|
||||
def insert(self, i, location):
|
||||
_check_image_location(self.image_proxy.context,
|
||||
self.image_proxy.store_api, location)
|
||||
|
||||
_remove_extra_info(location)
|
||||
if location in self.value:
|
||||
raise exception.DuplicateLocation(location=location['url'])
|
||||
|
||||
self.value.insert(i, location)
|
||||
_set_image_size(self.image_proxy.context,
|
||||
self.image_proxy,
|
||||
[location])
|
||||
|
||||
def pop(self, i=-1):
|
||||
location = self.value.pop(i)
|
||||
try:
|
||||
delete_image_from_backend(self.image_proxy.context,
|
||||
self.image_proxy.store_api,
|
||||
self.image_proxy.image.image_id,
|
||||
location['url'])
|
||||
except Exception:
|
||||
self.value.insert(i, location)
|
||||
raise
|
||||
return location
|
||||
|
||||
def count(self, location):
|
||||
return self.value.count(location)
|
||||
|
||||
def index(self, location, *args):
|
||||
return self.value.index(location, *args)
|
||||
|
||||
def remove(self, location):
|
||||
if self.count(location):
|
||||
self.pop(self.index(location))
|
||||
else:
|
||||
self.value.remove(location)
|
||||
|
||||
def reverse(self):
|
||||
self.value.reverse()
|
||||
|
||||
# Mutable sequence, so not hashable
|
||||
__hash__ = None
|
||||
|
||||
def __getitem__(self, i):
|
||||
return self.value.__getitem__(i)
|
||||
|
||||
def __setitem__(self, i, location):
|
||||
_check_image_location(self.image_proxy.context,
|
||||
self.image_proxy.store_api, location)
|
||||
self.value.__setitem__(i, location)
|
||||
_set_image_size(self.image_proxy.context,
|
||||
self.image_proxy,
|
||||
[location])
|
||||
|
||||
def __delitem__(self, i):
|
||||
location = None
|
||||
try:
|
||||
location = self.value.__getitem__(i)
|
||||
except Exception:
|
||||
return self.value.__delitem__(i)
|
||||
delete_image_from_backend(self.image_proxy.context,
|
||||
self.image_proxy.store_api,
|
||||
self.image_proxy.image.image_id,
|
||||
location['url'])
|
||||
self.value.__delitem__(i)
|
||||
|
||||
def __delslice__(self, i, j):
|
||||
i = max(i, 0)
|
||||
j = max(j, 0)
|
||||
locations = []
|
||||
try:
|
||||
locations = self.value.__getslice__(i, j)
|
||||
except Exception:
|
||||
return self.value.__delslice__(i, j)
|
||||
for location in locations:
|
||||
delete_image_from_backend(self.image_proxy.context,
|
||||
self.image_proxy.store_api,
|
||||
self.image_proxy.image.image_id,
|
||||
location['url'])
|
||||
self.value.__delitem__(i)
|
||||
|
||||
def __iadd__(self, other):
|
||||
self.extend(other)
|
||||
return self
|
||||
|
||||
def __contains__(self, location):
|
||||
return location in self.value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.value)
|
||||
|
||||
def __cast(self, other):
|
||||
if isinstance(other, StoreLocations):
|
||||
return other.value
|
||||
else:
|
||||
return other
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(self.value, self.__cast(other))
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.value)
|
||||
|
||||
def __copy__(self):
|
||||
return type(self)(self.image_proxy, self.value)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
# NOTE(zhiyan): Only copy location entries, others can be reused.
|
||||
value = copy.deepcopy(self.value, memo)
|
||||
self.image_proxy.image.locations = value
|
||||
return type(self)(self.image_proxy, value)
|
||||
|
||||
|
||||
def _locations_proxy(target, attr):
|
||||
"""
|
||||
Make a location property proxy on the image object.
|
||||
|
||||
:param target: the image object on which to add the proxy
|
||||
:param attr: the property proxy we want to hook
|
||||
"""
|
||||
|
||||
def get_attr(self):
|
||||
value = getattr(getattr(self, target), attr)
|
||||
return StoreLocations(self, value)
|
||||
|
||||
def set_attr(self, value):
|
||||
if not isinstance(value, (list, StoreLocations)):
|
||||
raise exception.BadStoreUri(_('Invalid locations: %s') % value)
|
||||
ori_value = getattr(getattr(self, target), attr)
|
||||
if ori_value != value:
|
||||
# NOTE(zhiyan): Enforced locations list was previously empty list.
|
||||
if len(ori_value) > 0:
|
||||
raise exception.Invalid(_('Original locations is not empty: '
|
||||
'%s') % ori_value)
|
||||
# NOTE(zhiyan): Check locations are all valid.
|
||||
for location in value:
|
||||
_check_image_location(self.context, self.store_api,
|
||||
location)
|
||||
if value.count(location) > 1:
|
||||
raise exception.DuplicateLocation(location=location['url'])
|
||||
_set_image_size(self.context, getattr(self, target), value)
|
||||
return setattr(getattr(self, target), attr, list(value))
|
||||
|
||||
def del_attr(self):
|
||||
value = getattr(getattr(self, target), attr)
|
||||
while len(value):
|
||||
delete_image_from_backend(self.context, self.store_api,
|
||||
self.image.image_id, value[0]['url'])
|
||||
del value[0]
|
||||
setattr(getattr(self, target), attr, value)
|
||||
return delattr(getattr(self, target), attr)
|
||||
|
||||
return property(get_attr, set_attr, del_attr)
|
||||
|
||||
pattern = re.compile(r'^https?://\S+/v2/images/\S+$')
|
||||
|
||||
|
||||
class ImageProxy(glance.domain.proxy.Image):
|
||||
|
||||
locations = _locations_proxy('image', 'locations')
|
||||
|
||||
def __init__(self, image, context, store_api):
|
||||
self.image = image
|
||||
self.context = context
|
||||
self.store_api = store_api
|
||||
proxy_kwargs = {
|
||||
'context': context,
|
||||
'image': self,
|
||||
'store_api': store_api,
|
||||
}
|
||||
super(ImageProxy, self).__init__(
|
||||
image, member_repo_proxy_class=ImageMemberRepoProxy,
|
||||
member_repo_proxy_kwargs=proxy_kwargs)
|
||||
|
||||
def delete(self):
|
||||
self.image.delete()
|
||||
if self.image.locations:
|
||||
for location in self.image.locations:
|
||||
self.store_api.delete_image_from_backend(self.context,
|
||||
self.store_api,
|
||||
self.image.image_id,
|
||||
location['url'])
|
||||
|
||||
def set_data(self, data, size=None):
|
||||
if size is None:
|
||||
size = 0 # NOTE(markwash): zero -> unknown size
|
||||
location, size, checksum, loc_meta = self.store_api.add_to_backend(
|
||||
self.context, CONF.default_store,
|
||||
self.image.image_id, utils.CooperativeReader(data), size)
|
||||
loc_meta = loc_meta or {}
|
||||
loc_meta['is_default'] = 'true'
|
||||
self.image.locations = [{'url': location, 'metadata': loc_meta}]
|
||||
self.image.size = size
|
||||
self.image.checksum = checksum
|
||||
self.image.status = 'active'
|
||||
|
||||
def get_data(self):
|
||||
if not self.image.locations:
|
||||
raise exception.NotFound(_("No image data could be found"))
|
||||
err = None
|
||||
for loc in self.image.locations:
|
||||
if pattern.match(loc['url']):
|
||||
continue
|
||||
try:
|
||||
data, size = self.store_api.get_from_backend(self.context,
|
||||
loc['url'])
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
LOG.warn(_('Get image %(id)s data failed: '
|
||||
'%(err)s.') % {'id': self.image.image_id,
|
||||
'err': six.text_type(e)})
|
||||
err = e
|
||||
# tried all locations
|
||||
LOG.error(_('Glance tried all locations to get data for image %s '
|
||||
'but all have failed.') % self.image.image_id)
|
||||
raise err
|
||||
|
||||
|
||||
class ImageMemberRepoProxy(glance.domain.proxy.Repo):
|
||||
|
||||
def __init__(self, repo, image, context, store_api):
|
||||
self.repo = repo
|
||||
self.image = image
|
||||
self.context = context
|
||||
self.store_api = store_api
|
||||
super(ImageMemberRepoProxy, self).__init__(repo)
|
||||
|
||||
def _set_acls(self):
|
||||
public = self.image.visibility == 'public'
|
||||
if self.image.locations and not public:
|
||||
member_ids = [m.member_id for m in self.repo.list()]
|
||||
for location in self.image.locations:
|
||||
self.store_api.set_acls(self.context, location['url'], public,
|
||||
read_tenants=member_ids)
|
||||
|
||||
def add(self, member):
|
||||
super(ImageMemberRepoProxy, self).add(member)
|
||||
self._set_acls()
|
||||
|
||||
def remove(self, member):
|
||||
super(ImageMemberRepoProxy, self).remove(member)
|
||||
self._set_acls()
|
|
@ -1,165 +0,0 @@
|
|||
Openstack Neutron DVR patch
|
||||
===============================
|
||||
|
||||
To solve the scalability problem in the OpenStack Neutron Deployment and to distribute the Network Node load to other Compute Nodes, some people proposed a solusion which is named DVR(Distributed Virtual Router).Distributed Virtual Router solves both the problems by providing a solution that would fit into the existing model.
|
||||
|
||||
DVR feature code has been merged into the neutron master branch, Neutron Juno release version would have expected the DVR characteristic. This patch was download from DVR branch on 1st June.
|
||||
|
||||
|
||||
Key modules
|
||||
-----------
|
||||
|
||||
* L2 Agent Doc
|
||||
|
||||
https://docs.google.com/document/d/1depasJSnGZPOnRLxEC_PYsVLcGVFXZLqP52RFTe21BE/edit#heading=h.5w7clq272tji
|
||||
|
||||
* L3 Agent Doc
|
||||
|
||||
https://docs.google.com/document/d/1jCmraZGirmXq5V1MtRqhjdZCbUfiwBhRkUjDXGt5QUQ/edit
|
||||
|
||||
Addressed by: https://review.openstack.org/84223
|
||||
* Add L3 Extension for Distributed Routers
|
||||
|
||||
Addressed by: https://review.openstack.org/87730
|
||||
* L2 Agent/ML2 Plugin changes for L3 DVR
|
||||
|
||||
Addressed by: https://review.openstack.org/88442
|
||||
* Add 'ip neigh' to ip_lib
|
||||
|
||||
Addressed by: https://review.openstack.org/89413
|
||||
* Modify L3 Agent for Distributed Routers
|
||||
|
||||
Addressed by: https://review.openstack.org/89694
|
||||
* Add L3 Scheduler Changes for Distributed Routers
|
||||
|
||||
Addressed by: https://review.openstack.org/93233
|
||||
* Add 'ip rule add from' to ip_lib
|
||||
|
||||
Addressed by: https://review.openstack.org/96389
|
||||
* Addressed merge conflict
|
||||
|
||||
Addressed by: https://review.openstack.org/97028
|
||||
* Refactor some router-related methods
|
||||
|
||||
Addressed by: https://review.openstack.org/97275
|
||||
* Allow L3 base to handle extensions on router creation
|
||||
|
||||
Addressed by: https://review.openstack.org/102101
|
||||
* L2 Model additions to support DVR
|
||||
|
||||
Addressed by: https://review.openstack.org/102332
|
||||
* RPC additions to support DVR
|
||||
|
||||
Addressed by: https://review.openstack.org/102398
|
||||
* ML2 additions to support DVR
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* openstack-neutron-server-2014.1-1.1 has been installed
|
||||
* oslo.db-0.2.0 has been installed
|
||||
* sqlalchemy-migrate-0.9.1 has been installed
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
We provide two ways to install the DVR patch code. In this section, we will guide you through installing the neutron DVR code with the minimum configuration.
|
||||
|
||||
* **Note:**
|
||||
|
||||
- Make sure you have an existing installation of **Openstack Icehouse**.
|
||||
- We recommend that you Do backup at least the following files before installation, because they are to be overwritten or modified:
|
||||
$NEUTRON_CONFIG_PARENT_DIR/neutron.conf
|
||||
(replace the $... with actual directory names.)
|
||||
|
||||
* **Manual Installation**
|
||||
|
||||
- Navigate to the local repository and copy the contents in 'neutron' sub-directory to the corresponding places in existing neutron, e.g.
|
||||
```cp -r $LOCAL_REPOSITORY_DIR/neutron $NEUTRON_PARENT_DIR```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- Navigate to the local repository and copy the contents in 'etc' sub-directory to the corresponding places in existing neutron, e.g.
|
||||
```cp -r $LOCAL_REPOSITORY_DIR/etc $NEUTRON_CONFIG_DIR```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- Update the neutron configuration file (e.g. /etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini) with the minimum option below. If the option already exists, modify its value, otherwise add it to the config file. Check the "Configurations" section below for a full configuration guide.
|
||||
1)update l3 agent configurations(/etc/neutron/l3_agent.ini)
|
||||
```
|
||||
[DEFAULT]
|
||||
...
|
||||
distributed_agent=True
|
||||
```
|
||||
2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
|
||||
```
|
||||
[AGENT]
|
||||
...
|
||||
enable_distributed_routing = True
|
||||
```
|
||||
|
||||
- Remove the neutron DB
|
||||
|
||||
- Create the neutron DB
|
||||
```neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head```
|
||||
|
||||
- Restart the neutron-server/openvswitch-agent/l3-agent.
|
||||
```service openstack-neutron restart```
|
||||
```service openstack-neutron-openvswitch-agent restart```
|
||||
```service openstack-neutron-l3-agent restart```
|
||||
|
||||
- Done.
|
||||
|
||||
* **Automatic Installation**
|
||||
|
||||
- Navigate to the installation directory and run installation script.
|
||||
```
|
||||
cd $LOCAL_REPOSITORY_DIR/installation
|
||||
sudo bash ./install.sh
|
||||
```
|
||||
(replace the $... with actual directory name.)
|
||||
|
||||
- Done. The installation code should setup the DVR code without the minimum configuration modifying. Check the "Configurations" section for a full configuration guide.
|
||||
1)update l3 agent configurations(/etc/neutron/l3_agent.ini)
|
||||
```
|
||||
[DEFAULT]
|
||||
...
|
||||
distributed_agent=True
|
||||
```
|
||||
2)update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
|
||||
```
|
||||
[AGENT]
|
||||
...
|
||||
enable_distributed_routing = True
|
||||
```
|
||||
|
||||
* **Troubleshooting**
|
||||
|
||||
In case the automatic installation process is not complete, please check the followings:
|
||||
|
||||
- Make sure your OpenStack version is Icehouse.
|
||||
|
||||
- Check the variables in the beginning of the install.sh scripts. Your installation directories may be different from the default values we provide.
|
||||
|
||||
- The installation code will automatically add the related codes to $NEUTRON_PARENT_DIR/nova but not modify the related configuration, you should update the related configurations manually.
|
||||
- In case the automatic installation does not work, try to install manually.
|
||||
|
||||
Configurations
|
||||
--------------
|
||||
|
||||
* This is a (default) configuration sample for the l2 proxy. Please add/modify these options in (/etc/neutron/l3_agent.ini, /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini).
|
||||
* Note:
|
||||
- Please carefully make sure that options in the configuration file are not duplicated. If an option name already exists, modify its value instead of adding a new one of the same name.
|
||||
- Please refer to the 'Configuration Details' section below for proper configuration and usage of costs and constraints.
|
||||
|
||||
1)add or update l3 agent configurations(/etc/neutron/l3_agent.ini)
|
||||
```
|
||||
[DEFAULT]
|
||||
...
|
||||
#Enables distributed router agent function
|
||||
distributed_agent=True
|
||||
```
|
||||
2)add or update openvswitch agent configurations(/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini)
|
||||
```
|
||||
[AGENT]
|
||||
...
|
||||
#Make the l2 agent run in dvr mode
|
||||
enable_distributed_routing = True
|
||||
```
|
|
@ -1,148 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# Copyright (c) 2014 Huawei Technologies.
|
||||
|
||||
_MYSQL_PASS="Galax8800"
|
||||
_NEUTRON_CONF_DIR="/etc/neutron"
|
||||
_NEUTRON_CONF_FILE='neutron.conf'
|
||||
_NEUTRON_INSTALL="/usr/lib64/python2.6/site-packages"
|
||||
_NEUTRON_DIR="${_NEUTRON_INSTALL}/neutron"
|
||||
|
||||
_NEUTRON_L2_CONFIG_FILE='/plugins/openvswitch/ovs_neutron_plugin.ini'
|
||||
_NEUTRON_L3_CONFIG_FILE='l3_agent.ini'
|
||||
# if you did not make changes to the installation files,
|
||||
# please do not edit the following directories.
|
||||
_CODE_DIR="../neutron/"
|
||||
_BACKUP_DIR="${_NEUTRON_INSTALL}/.neutron-dvr-code-installation-backup"
|
||||
|
||||
l2_config_option_list="\[AGENT\]:firewall_driver=neutron.agent.firewall.NoopFirewallDriver \[SECURITYGROUP\]:enable_distributed_routing=True"
|
||||
l3_config_option_list="\[DEFAULT\]:distributed_agent=True"
|
||||
|
||||
#_SCRIPT_NAME="${0##*/}"
|
||||
#_SCRIPT_LOGFILE="/var/log/neutron-dvr-code/installation/${_SCRIPT_NAME}.log"
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "Please run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##Redirecting output to logfile as well as stdout
|
||||
#exec > >(tee -a ${_SCRIPT_LOGFILE})
|
||||
#exec 2> >(tee -a ${_SCRIPT_LOGFILE} >&2)
|
||||
|
||||
cd `dirname $0`
|
||||
|
||||
echo "checking installation directories..."
|
||||
if [ ! -d "${_NEUTRON_DIR}" ] ; then
|
||||
echo "Could not find the neutron installation. Please check the variables in the beginning of the script."
|
||||
echo "aborted."
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -f "${_NEUTRON_CONF_DIR}/${_NEUTRON_CONF_FILE}" ] ; then
|
||||
echo "Could not find neutron config file. Please check the variables in the beginning of the script."
|
||||
echo "aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "checking previous installation..."
|
||||
if [ -d "${_BACKUP_DIR}/neutron" ] ; then
|
||||
echo "It seems neutron-dvr-code-cascaded has already been installed!"
|
||||
echo "Please check README for solution if this is not true."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "backing up current code files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}"
|
||||
cp -r "${_NEUTRON_DIR}/" "${_BACKUP_DIR}/"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/neutron"
|
||||
echo "Error in code backup code files, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "backing up current config code files that might be overwritten..."
|
||||
mkdir -p "${_BACKUP_DIR}/etc"
|
||||
cp -r "${_NEUTRON_CONF_DIR}/" "${_BACKUP_DIR}/etc"
|
||||
if [ $? -ne 0 ] ; then
|
||||
rm -r "${_BACKUP_DIR}/etc"
|
||||
echo "Error in code backup config files, aborted."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "copying in new files..."
|
||||
cp -r "${_CODE_DIR}" `dirname ${_NEUTRON_DIR}`
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "Error in copying, aborted."
|
||||
echo "Recovering original files..."
|
||||
cp -r "${_BACKUP_DIR}/neutron" `dirname ${_NEUTRON_DIR}` && rm -r "${_BACKUP_DIR}/neutron"
|
||||
if [ $? -ne 0 ] ; then
|
||||
echo "Recovering failed! Please install manually."
|
||||
fi
|
||||
exit 1
|
||||
|
||||
fi
|
||||
|
||||
if [ -d "${_NEUTRON_DIR}/openstack/common/db/rpc" ] ; then
|
||||
rm -r "${_NEUTRON_DIR}/openstack/common/db/rpc"
|
||||
fi
|
||||
|
||||
echo "updating l2 config file..."
|
||||
for option in $l2_config_option_list
|
||||
do
|
||||
option_branch=`echo $option|awk -F ":" '{print $1}'`
|
||||
option_config=`echo $option|awk -F ":" '{print $2}'`
|
||||
option_key=`echo $option_config|awk -F "=" '{print $1}'`
|
||||
option_value=`echo $option_config|awk -F "=" '{print $2}'`
|
||||
sed -i.backup -e "/$option_key *=/d" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_CONFIG_FILE}"
|
||||
echo "$option_key,***************$option_value"
|
||||
sed -i "/$option_branch/a\\$option_key=$option_value" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L2_CONFIG_FILE}"
|
||||
|
||||
done
|
||||
echo "updating l3 config file..."
|
||||
for option in $l3_config_option_list
|
||||
do
|
||||
option_branch=`echo $option|awk -F ":" '{print $1}'`
|
||||
option_config=`echo $option|awk -F ":" '{print $2}'`
|
||||
option_key=`echo $option_config|awk -F "=" '{print $1}'`
|
||||
option_value=`echo $option_config|awk -F "=" '{print $2}'`
|
||||
sed -i.backup -e "/$option_key *=/d" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONFIG_FILE}"
|
||||
echo "$option_key,***************$option_value"
|
||||
sed -i "/$option_branch/a\\$option_key=$option_value" "${_NEUTRON_CONF_DIR}/${_NEUTRON_L3_CONFIG_FILE}"
|
||||
|
||||
done
|
||||
|
||||
|
||||
echo "create neutron db..."
|
||||
exec_sql_str="DROP DATABASE if exists neutron;CREATE DATABASE neutron;GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY \"$_MYSQL_PASS\";GRANT ALL PRIVILEGES ON *.* TO 'neutron'@'%'IDENTIFIED BY \"$_MYSQL_PASS\";"
|
||||
mysql -u root -p$_MYSQL_PASS -e "$exec_sql_str"
|
||||
echo "syc neutron db..."
|
||||
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
|
||||
|
||||
if [ $? -ne 0 ] ; then
|
||||
log "There was an error in sync neutron db, please sync neutron db manually."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#echo "restarting neutron server..."
|
||||
#service openstack-neutron stop
|
||||
|
||||
#if [ $? -ne 0 ] ; then
|
||||
# echo "There was an error in restarting the service, please restart neutron server manually."
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
echo "Completed."
|
||||
echo "See README to get started."
|
||||
|
||||
exit 0
|
|
@ -1,19 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gettext
|
||||
|
||||
|
||||
gettext.install('neutron', unicode=1)
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,121 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import config
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ROOT_HELPER_OPTS = [
|
||||
cfg.StrOpt('root_helper', default='sudo',
|
||||
help=_('Root helper application.')),
|
||||
]
|
||||
|
||||
AGENT_STATE_OPTS = [
|
||||
cfg.FloatOpt('report_interval', default=30,
|
||||
help=_('Seconds between nodes reporting state to server; '
|
||||
'should be less than agent_down_time, best if it '
|
||||
'is half or less than agent_down_time.')),
|
||||
]
|
||||
|
||||
INTERFACE_DRIVER_OPTS = [
|
||||
cfg.StrOpt('interface_driver',
|
||||
help=_("The driver used to manage the virtual interface.")),
|
||||
]
|
||||
|
||||
USE_NAMESPACES_OPTS = [
|
||||
cfg.BoolOpt('use_namespaces', default=True,
|
||||
help=_("Allow overlapping IP.")),
|
||||
]
|
||||
|
||||
|
||||
def get_log_args(conf, log_file_name):
|
||||
cmd_args = []
|
||||
if conf.debug:
|
||||
cmd_args.append('--debug')
|
||||
if conf.verbose:
|
||||
cmd_args.append('--verbose')
|
||||
if (conf.log_dir or conf.log_file):
|
||||
cmd_args.append('--log-file=%s' % log_file_name)
|
||||
log_dir = None
|
||||
if conf.log_dir and conf.log_file:
|
||||
log_dir = os.path.dirname(
|
||||
os.path.join(conf.log_dir, conf.log_file))
|
||||
elif conf.log_dir:
|
||||
log_dir = conf.log_dir
|
||||
elif conf.log_file:
|
||||
log_dir = os.path.dirname(conf.log_file)
|
||||
if log_dir:
|
||||
cmd_args.append('--log-dir=%s' % log_dir)
|
||||
else:
|
||||
if conf.use_syslog:
|
||||
cmd_args.append('--use-syslog')
|
||||
if conf.syslog_log_facility:
|
||||
cmd_args.append(
|
||||
'--syslog-log-facility=%s' % conf.syslog_log_facility)
|
||||
return cmd_args
|
||||
|
||||
|
||||
def register_root_helper(conf):
|
||||
# The first call is to ensure backward compatibility
|
||||
conf.register_opts(ROOT_HELPER_OPTS)
|
||||
conf.register_opts(ROOT_HELPER_OPTS, 'AGENT')
|
||||
|
||||
|
||||
def register_agent_state_opts_helper(conf):
|
||||
conf.register_opts(AGENT_STATE_OPTS, 'AGENT')
|
||||
|
||||
|
||||
def register_interface_driver_opts_helper(conf):
|
||||
conf.register_opts(INTERFACE_DRIVER_OPTS)
|
||||
|
||||
|
||||
def register_use_namespaces_opts_helper(conf):
|
||||
conf.register_opts(USE_NAMESPACES_OPTS)
|
||||
|
||||
|
||||
def get_root_helper(conf):
|
||||
root_helper = conf.AGENT.root_helper
|
||||
if root_helper != 'sudo':
|
||||
return root_helper
|
||||
|
||||
root_helper = conf.root_helper
|
||||
if root_helper != 'sudo':
|
||||
LOG.deprecated(_('DEFAULT.root_helper is deprecated! Please move '
|
||||
'root_helper configuration to [AGENT] section.'))
|
||||
return root_helper
|
||||
|
||||
return 'sudo'
|
||||
|
||||
|
||||
def setup_conf():
|
||||
bind_opts = [
|
||||
cfg.StrOpt('state_path',
|
||||
default='/var/lib/neutron',
|
||||
help=_('Top-level directory for maintaining dhcp state')),
|
||||
]
|
||||
|
||||
conf = cfg.ConfigOpts()
|
||||
conf.register_opts(bind_opts)
|
||||
return conf
|
||||
|
||||
# add a logging setup method here for convenience
|
||||
setup_logging = config.setup_logging
|
|
@ -1,620 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.linux import dhcp
|
||||
from neutron.agent.linux import external_process
|
||||
from neutron.agent.linux import interface
|
||||
from neutron.agent.linux import ovs_lib # noqa
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.common import config as common_config
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils
|
||||
from neutron import context
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import importutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import service
|
||||
from neutron import service as neutron_service
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DhcpAgent(manager.Manager):
|
||||
OPTS = [
|
||||
cfg.IntOpt('resync_interval', default=5,
|
||||
help=_("Interval to resync.")),
|
||||
cfg.StrOpt('dhcp_driver',
|
||||
default='neutron.agent.linux.dhcp.Dnsmasq',
|
||||
help=_("The driver used to manage the DHCP server.")),
|
||||
cfg.BoolOpt('enable_isolated_metadata', default=False,
|
||||
help=_("Support Metadata requests on isolated networks.")),
|
||||
cfg.BoolOpt('enable_metadata_network', default=False,
|
||||
help=_("Allows for serving metadata requests from a "
|
||||
"dedicated network. Requires "
|
||||
"enable_isolated_metadata = True")),
|
||||
cfg.IntOpt('num_sync_threads', default=4,
|
||||
help=_('Number of threads to use during sync process.')),
|
||||
cfg.StrOpt('metadata_proxy_socket',
|
||||
default='$state_path/metadata_proxy',
|
||||
help=_('Location of Metadata Proxy UNIX domain '
|
||||
'socket')),
|
||||
]
|
||||
|
||||
def __init__(self, host=None):
|
||||
super(DhcpAgent, self).__init__(host=host)
|
||||
self.needs_resync_reasons = []
|
||||
self.conf = cfg.CONF
|
||||
self.cache = NetworkCache()
|
||||
self.root_helper = config.get_root_helper(self.conf)
|
||||
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
|
||||
ctx = context.get_admin_context_without_session()
|
||||
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
|
||||
ctx, self.conf.use_namespaces)
|
||||
# create dhcp dir to store dhcp info
|
||||
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
|
||||
if not os.path.isdir(dhcp_dir):
|
||||
os.makedirs(dhcp_dir, 0o755)
|
||||
self.dhcp_version = self.dhcp_driver_cls.check_version()
|
||||
self._populate_networks_cache()
|
||||
|
||||
def _populate_networks_cache(self):
|
||||
"""Populate the networks cache when the DHCP-agent starts."""
|
||||
try:
|
||||
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
|
||||
self.conf,
|
||||
self.root_helper
|
||||
)
|
||||
for net_id in existing_networks:
|
||||
net = dhcp.NetModel(self.conf.use_namespaces,
|
||||
{"id": net_id,
|
||||
"subnets": [],
|
||||
"ports": []})
|
||||
self.cache.put(net)
|
||||
except NotImplementedError:
|
||||
# just go ahead with an empty networks cache
|
||||
LOG.debug(
|
||||
_("The '%s' DHCP-driver does not support retrieving of a "
|
||||
"list of existing networks"),
|
||||
self.conf.dhcp_driver
|
||||
)
|
||||
|
||||
def after_start(self):
|
||||
self.run()
|
||||
LOG.info(_("DHCP agent started"))
|
||||
|
||||
def run(self):
|
||||
"""Activate the DHCP agent."""
|
||||
self.sync_state()
|
||||
self.periodic_resync()
|
||||
|
||||
def call_driver(self, action, network, **action_kwargs):
|
||||
"""Invoke an action on a DHCP driver instance."""
|
||||
LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'),
|
||||
{'net': network.id, 'action': action})
|
||||
try:
|
||||
# the Driver expects something that is duck typed similar to
|
||||
# the base models.
|
||||
driver = self.dhcp_driver_cls(self.conf,
|
||||
network,
|
||||
self.root_helper,
|
||||
self.dhcp_version,
|
||||
self.plugin_rpc)
|
||||
|
||||
getattr(driver, action)(**action_kwargs)
|
||||
return True
|
||||
except exceptions.Conflict:
|
||||
# No need to resync here, the agent will receive the event related
|
||||
# to a status update for the network
|
||||
LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is '
|
||||
'a conflict with its current state; please check '
|
||||
'that the network and/or its subnet(s) still exist.')
|
||||
% {'net_id': network.id, 'action': action})
|
||||
except Exception as e:
|
||||
self.schedule_resync(e)
|
||||
if (isinstance(e, n_rpc.RemoteError)
|
||||
and e.exc_type == 'NetworkNotFound'
|
||||
or isinstance(e, exceptions.NetworkNotFound)):
|
||||
LOG.warning(_("Network %s has been deleted."), network.id)
|
||||
else:
|
||||
LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.')
|
||||
% {'net_id': network.id, 'action': action})
|
||||
|
||||
def schedule_resync(self, reason):
|
||||
"""Schedule a resync for a given reason."""
|
||||
self.needs_resync_reasons.append(reason)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def sync_state(self):
|
||||
"""Sync the local DHCP state with Neutron."""
|
||||
LOG.info(_('Synchronizing state'))
|
||||
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
|
||||
known_network_ids = set(self.cache.get_network_ids())
|
||||
|
||||
try:
|
||||
active_networks = self.plugin_rpc.get_active_networks_info()
|
||||
active_network_ids = set(network.id for network in active_networks)
|
||||
for deleted_id in known_network_ids - active_network_ids:
|
||||
try:
|
||||
self.disable_dhcp_helper(deleted_id)
|
||||
except Exception as e:
|
||||
self.schedule_resync(e)
|
||||
LOG.exception(_('Unable to sync network state on deleted '
|
||||
'network %s'), deleted_id)
|
||||
|
||||
for network in active_networks:
|
||||
pool.spawn(self.safe_configure_dhcp_for_network, network)
|
||||
pool.waitall()
|
||||
LOG.info(_('Synchronizing state complete'))
|
||||
|
||||
except Exception as e:
|
||||
self.schedule_resync(e)
|
||||
LOG.exception(_('Unable to sync network state.'))
|
||||
|
||||
def _periodic_resync_helper(self):
|
||||
"""Resync the dhcp state at the configured interval."""
|
||||
while True:
|
||||
eventlet.sleep(self.conf.resync_interval)
|
||||
if self.needs_resync_reasons:
|
||||
# be careful to avoid a race with additions to list
|
||||
# from other threads
|
||||
reasons = self.needs_resync_reasons
|
||||
self.needs_resync_reasons = []
|
||||
for r in reasons:
|
||||
LOG.debug(_("resync: %(reason)s"),
|
||||
{"reason": r})
|
||||
self.sync_state()
|
||||
|
||||
def periodic_resync(self):
|
||||
"""Spawn a thread to periodically resync the dhcp state."""
|
||||
eventlet.spawn(self._periodic_resync_helper)
|
||||
|
||||
def safe_get_network_info(self, network_id):
|
||||
try:
|
||||
network = self.plugin_rpc.get_network_info(network_id)
|
||||
if not network:
|
||||
LOG.warn(_('Network %s has been deleted.'), network_id)
|
||||
return network
|
||||
except Exception as e:
|
||||
self.schedule_resync(e)
|
||||
LOG.exception(_('Network %s info call failed.'), network_id)
|
||||
|
||||
def enable_dhcp_helper(self, network_id):
|
||||
"""Enable DHCP for a network that meets enabling criteria."""
|
||||
network = self.safe_get_network_info(network_id)
|
||||
if network:
|
||||
self.configure_dhcp_for_network(network)
|
||||
|
||||
def safe_configure_dhcp_for_network(self, network):
|
||||
try:
|
||||
self.configure_dhcp_for_network(network)
|
||||
except (exceptions.NetworkNotFound, RuntimeError):
|
||||
LOG.warn(_('Network %s may have been deleted and its resources '
|
||||
'may have already been disposed.'), network.id)
|
||||
|
||||
def configure_dhcp_for_network(self, network):
|
||||
if not network.admin_state_up:
|
||||
return
|
||||
|
||||
for subnet in network.subnets:
|
||||
if subnet.enable_dhcp:
|
||||
if self.call_driver('enable', network):
|
||||
if (self.conf.use_namespaces and
|
||||
self.conf.enable_isolated_metadata):
|
||||
self.enable_isolated_metadata_proxy(network)
|
||||
self.cache.put(network)
|
||||
break
|
||||
|
||||
def disable_dhcp_helper(self, network_id):
|
||||
"""Disable DHCP for a network known to the agent."""
|
||||
network = self.cache.get_network_by_id(network_id)
|
||||
if network:
|
||||
if (self.conf.use_namespaces and
|
||||
self.conf.enable_isolated_metadata):
|
||||
self.disable_isolated_metadata_proxy(network)
|
||||
if self.call_driver('disable', network):
|
||||
self.cache.remove(network)
|
||||
|
||||
def refresh_dhcp_helper(self, network_id):
|
||||
"""Refresh or disable DHCP for a network depending on the current state
|
||||
of the network.
|
||||
"""
|
||||
old_network = self.cache.get_network_by_id(network_id)
|
||||
if not old_network:
|
||||
# DHCP current not running for network.
|
||||
return self.enable_dhcp_helper(network_id)
|
||||
|
||||
network = self.safe_get_network_info(network_id)
|
||||
if not network:
|
||||
return
|
||||
|
||||
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
|
||||
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
|
||||
|
||||
if new_cidrs and old_cidrs == new_cidrs:
|
||||
self.call_driver('reload_allocations', network)
|
||||
self.cache.put(network)
|
||||
elif new_cidrs:
|
||||
if self.call_driver('restart', network):
|
||||
self.cache.put(network)
|
||||
else:
|
||||
self.disable_dhcp_helper(network.id)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def network_create_end(self, context, payload):
|
||||
"""Handle the network.create.end notification event."""
|
||||
network_id = payload['network']['id']
|
||||
self.enable_dhcp_helper(network_id)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def network_update_end(self, context, payload):
|
||||
"""Handle the network.update.end notification event."""
|
||||
network_id = payload['network']['id']
|
||||
if payload['network']['admin_state_up']:
|
||||
self.enable_dhcp_helper(network_id)
|
||||
else:
|
||||
self.disable_dhcp_helper(network_id)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def network_delete_end(self, context, payload):
|
||||
"""Handle the network.delete.end notification event."""
|
||||
self.disable_dhcp_helper(payload['network_id'])
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def subnet_update_end(self, context, payload):
|
||||
"""Handle the subnet.update.end notification event."""
|
||||
network_id = payload['subnet']['network_id']
|
||||
self.refresh_dhcp_helper(network_id)
|
||||
|
||||
# Use the update handler for the subnet create event.
|
||||
subnet_create_end = subnet_update_end
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def subnet_delete_end(self, context, payload):
|
||||
"""Handle the subnet.delete.end notification event."""
|
||||
subnet_id = payload['subnet_id']
|
||||
network = self.cache.get_network_by_subnet_id(subnet_id)
|
||||
if network:
|
||||
self.refresh_dhcp_helper(network.id)
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def port_update_end(self, context, payload):
|
||||
"""Handle the port.update.end notification event."""
|
||||
updated_port = dhcp.DictModel(payload['port'])
|
||||
network = self.cache.get_network_by_id(updated_port.network_id)
|
||||
if network:
|
||||
self.cache.put_port(updated_port)
|
||||
self.call_driver('reload_allocations', network)
|
||||
|
||||
# Use the update handler for the port create event.
|
||||
port_create_end = port_update_end
|
||||
|
||||
@utils.synchronized('dhcp-agent')
|
||||
def port_delete_end(self, context, payload):
|
||||
"""Handle the port.delete.end notification event."""
|
||||
port = self.cache.get_port_by_id(payload['port_id'])
|
||||
if port:
|
||||
network = self.cache.get_network_by_id(port.network_id)
|
||||
self.cache.remove_port(port)
|
||||
self.call_driver('reload_allocations', network)
|
||||
|
||||
def enable_isolated_metadata_proxy(self, network):
|
||||
|
||||
# The proxy might work for either a single network
|
||||
# or all the networks connected via a router
|
||||
# to the one passed as a parameter
|
||||
neutron_lookup_param = '--network_id=%s' % network.id
|
||||
meta_cidr = netaddr.IPNetwork(dhcp.METADATA_DEFAULT_CIDR)
|
||||
has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr
|
||||
for s in network.subnets)
|
||||
if (self.conf.enable_metadata_network and has_metadata_subnet):
|
||||
router_ports = [port for port in network.ports
|
||||
if (port.device_owner ==
|
||||
constants.DEVICE_OWNER_ROUTER_INTF)]
|
||||
if router_ports:
|
||||
# Multiple router ports should not be allowed
|
||||
if len(router_ports) > 1:
|
||||
LOG.warning(_("%(port_num)d router ports found on the "
|
||||
"metadata access network. Only the port "
|
||||
"%(port_id)s, for router %(router_id)s "
|
||||
"will be considered"),
|
||||
{'port_num': len(router_ports),
|
||||
'port_id': router_ports[0].id,
|
||||
'router_id': router_ports[0].device_id})
|
||||
neutron_lookup_param = ('--router_id=%s' %
|
||||
router_ports[0].device_id)
|
||||
|
||||
def callback(pid_file):
|
||||
metadata_proxy_socket = cfg.CONF.metadata_proxy_socket
|
||||
proxy_cmd = ['neutron-ns-metadata-proxy',
|
||||
'--pid_file=%s' % pid_file,
|
||||
'--metadata_proxy_socket=%s' % metadata_proxy_socket,
|
||||
neutron_lookup_param,
|
||||
'--state_path=%s' % self.conf.state_path,
|
||||
'--metadata_port=%d' % dhcp.METADATA_PORT]
|
||||
proxy_cmd.extend(config.get_log_args(
|
||||
cfg.CONF, 'neutron-ns-metadata-proxy-%s.log' % network.id))
|
||||
return proxy_cmd
|
||||
|
||||
pm = external_process.ProcessManager(
|
||||
self.conf,
|
||||
network.id,
|
||||
self.root_helper,
|
||||
network.namespace)
|
||||
pm.enable(callback)
|
||||
|
||||
def disable_isolated_metadata_proxy(self, network):
|
||||
pm = external_process.ProcessManager(
|
||||
self.conf,
|
||||
network.id,
|
||||
self.root_helper,
|
||||
network.namespace)
|
||||
pm.disable()
|
||||
|
||||
|
||||
class DhcpPluginApi(n_rpc.RpcProxy):
|
||||
"""Agent side of the dhcp rpc API.
|
||||
|
||||
API version history:
|
||||
1.0 - Initial version.
|
||||
1.1 - Added get_active_networks_info, create_dhcp_port,
|
||||
and update_dhcp_port methods.
|
||||
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.1'
|
||||
|
||||
def __init__(self, topic, context, use_namespaces):
|
||||
super(DhcpPluginApi, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
self.context = context
|
||||
self.host = cfg.CONF.host
|
||||
self.use_namespaces = use_namespaces
|
||||
|
||||
def get_active_networks_info(self):
|
||||
"""Make a remote process call to retrieve all network info."""
|
||||
networks = self.call(self.context,
|
||||
self.make_msg('get_active_networks_info',
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
|
||||
|
||||
def get_network_info(self, network_id):
|
||||
"""Make a remote process call to retrieve network info."""
|
||||
network = self.call(self.context,
|
||||
self.make_msg('get_network_info',
|
||||
network_id=network_id,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
if network:
|
||||
return dhcp.NetModel(self.use_namespaces, network)
|
||||
|
||||
def get_dhcp_port(self, network_id, device_id):
|
||||
"""Make a remote process call to get the dhcp port."""
|
||||
port = self.call(self.context,
|
||||
self.make_msg('get_dhcp_port',
|
||||
network_id=network_id,
|
||||
device_id=device_id,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
if port:
|
||||
return dhcp.DictModel(port)
|
||||
|
||||
def create_dhcp_port(self, port):
|
||||
"""Make a remote process call to create the dhcp port."""
|
||||
port = self.call(self.context,
|
||||
self.make_msg('create_dhcp_port',
|
||||
port=port,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
if port:
|
||||
return dhcp.DictModel(port)
|
||||
|
||||
def update_dhcp_port(self, port_id, port):
|
||||
"""Make a remote process call to update the dhcp port."""
|
||||
port = self.call(self.context,
|
||||
self.make_msg('update_dhcp_port',
|
||||
port_id=port_id,
|
||||
port=port,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
if port:
|
||||
return dhcp.DictModel(port)
|
||||
|
||||
def release_dhcp_port(self, network_id, device_id):
|
||||
"""Make a remote process call to release the dhcp port."""
|
||||
return self.call(self.context,
|
||||
self.make_msg('release_dhcp_port',
|
||||
network_id=network_id,
|
||||
device_id=device_id,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
|
||||
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
|
||||
"""Make a remote process call to release a fixed_ip on the port."""
|
||||
return self.call(self.context,
|
||||
self.make_msg('release_port_fixed_ip',
|
||||
network_id=network_id,
|
||||
subnet_id=subnet_id,
|
||||
device_id=device_id,
|
||||
host=self.host),
|
||||
topic=self.topic)
|
||||
|
||||
|
||||
class NetworkCache(object):
|
||||
"""Agent cache of the current network state."""
|
||||
def __init__(self):
|
||||
self.cache = {}
|
||||
self.subnet_lookup = {}
|
||||
self.port_lookup = {}
|
||||
|
||||
def get_network_ids(self):
|
||||
return self.cache.keys()
|
||||
|
||||
def get_network_by_id(self, network_id):
|
||||
return self.cache.get(network_id)
|
||||
|
||||
def get_network_by_subnet_id(self, subnet_id):
|
||||
return self.cache.get(self.subnet_lookup.get(subnet_id))
|
||||
|
||||
def get_network_by_port_id(self, port_id):
|
||||
return self.cache.get(self.port_lookup.get(port_id))
|
||||
|
||||
def put(self, network):
|
||||
if network.id in self.cache:
|
||||
self.remove(self.cache[network.id])
|
||||
|
||||
self.cache[network.id] = network
|
||||
|
||||
for subnet in network.subnets:
|
||||
self.subnet_lookup[subnet.id] = network.id
|
||||
|
||||
for port in network.ports:
|
||||
self.port_lookup[port.id] = network.id
|
||||
|
||||
def remove(self, network):
|
||||
del self.cache[network.id]
|
||||
|
||||
for subnet in network.subnets:
|
||||
del self.subnet_lookup[subnet.id]
|
||||
|
||||
for port in network.ports:
|
||||
del self.port_lookup[port.id]
|
||||
|
||||
def put_port(self, port):
|
||||
network = self.get_network_by_id(port.network_id)
|
||||
for index in range(len(network.ports)):
|
||||
if network.ports[index].id == port.id:
|
||||
network.ports[index] = port
|
||||
break
|
||||
else:
|
||||
network.ports.append(port)
|
||||
|
||||
self.port_lookup[port.id] = network.id
|
||||
|
||||
def remove_port(self, port):
|
||||
network = self.get_network_by_port_id(port.id)
|
||||
|
||||
for index in range(len(network.ports)):
|
||||
if network.ports[index] == port:
|
||||
del network.ports[index]
|
||||
del self.port_lookup[port.id]
|
||||
break
|
||||
|
||||
def get_port_by_id(self, port_id):
|
||||
network = self.get_network_by_port_id(port_id)
|
||||
if network:
|
||||
for port in network.ports:
|
||||
if port.id == port_id:
|
||||
return port
|
||||
|
||||
def get_state(self):
|
||||
net_ids = self.get_network_ids()
|
||||
num_nets = len(net_ids)
|
||||
num_subnets = 0
|
||||
num_ports = 0
|
||||
for net_id in net_ids:
|
||||
network = self.get_network_by_id(net_id)
|
||||
num_subnets += len(network.subnets)
|
||||
num_ports += len(network.ports)
|
||||
return {'networks': num_nets,
|
||||
'subnets': num_subnets,
|
||||
'ports': num_ports}
|
||||
|
||||
|
||||
class DhcpAgentWithStateReport(DhcpAgent):
|
||||
def __init__(self, host=None):
|
||||
super(DhcpAgentWithStateReport, self).__init__(host=host)
|
||||
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
|
||||
self.agent_state = {
|
||||
'binary': 'neutron-dhcp-agent',
|
||||
'host': host,
|
||||
'topic': topics.DHCP_AGENT,
|
||||
'configurations': {
|
||||
'dhcp_driver': cfg.CONF.dhcp_driver,
|
||||
'use_namespaces': cfg.CONF.use_namespaces,
|
||||
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
|
||||
'start_flag': True,
|
||||
'agent_type': constants.AGENT_TYPE_DHCP}
|
||||
report_interval = cfg.CONF.AGENT.report_interval
|
||||
self.use_call = True
|
||||
if report_interval:
|
||||
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
|
||||
self._report_state)
|
||||
self.heartbeat.start(interval=report_interval)
|
||||
|
||||
def _report_state(self):
|
||||
try:
|
||||
self.agent_state.get('configurations').update(
|
||||
self.cache.get_state())
|
||||
ctx = context.get_admin_context_without_session()
|
||||
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
|
||||
self.use_call = False
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_("Neutron server does not support state report."
|
||||
" State report for this agent will be disabled."))
|
||||
self.heartbeat.stop()
|
||||
self.run()
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_("Failed reporting state!"))
|
||||
return
|
||||
if self.agent_state.pop('start_flag', None):
|
||||
self.run()
|
||||
|
||||
def agent_updated(self, context, payload):
|
||||
"""Handle the agent_updated notification event."""
|
||||
self.schedule_resync(_("Agent updated: %(payload)s") %
|
||||
{"payload": payload})
|
||||
LOG.info(_("agent_updated by server side %s!"), payload)
|
||||
|
||||
def after_start(self):
|
||||
LOG.info(_("DHCP agent started"))
|
||||
|
||||
|
||||
def register_options():
|
||||
cfg.CONF.register_opts(DhcpAgent.OPTS)
|
||||
config.register_interface_driver_opts_helper(cfg.CONF)
|
||||
config.register_use_namespaces_opts_helper(cfg.CONF)
|
||||
config.register_agent_state_opts_helper(cfg.CONF)
|
||||
config.register_root_helper(cfg.CONF)
|
||||
cfg.CONF.register_opts(dhcp.OPTS)
|
||||
cfg.CONF.register_opts(interface.OPTS)
|
||||
|
||||
|
||||
def main():
|
||||
register_options()
|
||||
common_config.init(sys.argv[1:])
|
||||
config.setup_logging(cfg.CONF)
|
||||
server = neutron_service.Service.create(
|
||||
binary='neutron-dhcp-agent',
|
||||
topic=topics.DHCP_AGENT,
|
||||
report_interval=cfg.CONF.AGENT.report_interval,
|
||||
manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport')
|
||||
service.launch(server).wait()
|
|
@ -1,136 +0,0 @@
|
|||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import contextlib
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class FirewallDriver(object):
|
||||
"""Firewall Driver base class.
|
||||
|
||||
Defines methods that any driver providing security groups
|
||||
and provider firewall functionality should implement.
|
||||
Note port attribute should have information of security group ids and
|
||||
security group rules.
|
||||
|
||||
the dict of port should have
|
||||
device : interface name
|
||||
fixed_ips: ips of the device
|
||||
mac_address: mac_address of the device
|
||||
security_groups: [sgid, sgid]
|
||||
security_group_rules : [ rule, rule ]
|
||||
the rule must contain ethertype and direction
|
||||
the rule may contain security_group_id,
|
||||
protocol, port_min, port_max
|
||||
source_ip_prefix, source_port_min,
|
||||
source_port_max, dest_ip_prefix, and
|
||||
remote_group_id
|
||||
Note: source_group_ip in REST API should be converted by this rule
|
||||
if direction is ingress:
|
||||
remote_group_ip will be a source_ip_prefix
|
||||
if direction is egress:
|
||||
remote_group_ip will be a dest_ip_prefix
|
||||
Note: remote_group_id in REST API should be converted by this rule
|
||||
if direction is ingress:
|
||||
remote_group_id will be a list of source_ip_prefix
|
||||
if direction is egress:
|
||||
remote_group_id will be a list of dest_ip_prefix
|
||||
remote_group_id will also remaining membership update management
|
||||
"""
|
||||
|
||||
def prepare_port_filter(self, port):
|
||||
"""Prepare filters for the port.
|
||||
|
||||
This method should be called before the port is created.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def apply_port_filter(self, port):
|
||||
"""Apply port filter.
|
||||
|
||||
Once this method returns, the port should be firewalled
|
||||
appropriately. This method should as far as possible be a
|
||||
no-op. It's vastly preferred to get everything set up in
|
||||
prepare_port_filter.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_port_filter(self, port):
|
||||
"""Refresh security group rules from data store
|
||||
|
||||
Gets called when an port gets added to or removed from
|
||||
the security group the port is a member of or if the
|
||||
group gains or looses a rule.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_port_filter(self, port):
|
||||
"""Stop filtering port."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def filter_defer_apply_on(self):
|
||||
"""Defer application of filtering rule."""
|
||||
pass
|
||||
|
||||
def filter_defer_apply_off(self):
|
||||
"""Turn off deferral of rules and apply the rules now."""
|
||||
pass
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
"""Returns filtered ports."""
|
||||
pass
|
||||
|
||||
@contextlib.contextmanager
|
||||
def defer_apply(self):
|
||||
"""Defer apply context."""
|
||||
self.filter_defer_apply_on()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.filter_defer_apply_off()
|
||||
|
||||
|
||||
class NoopFirewallDriver(FirewallDriver):
|
||||
"""Noop Firewall Driver.
|
||||
|
||||
Firewall driver which does nothing.
|
||||
This driver is for disabling the firewall functionality.
|
||||
"""
|
||||
|
||||
def prepare_port_filter(self, port):
|
||||
pass
|
||||
|
||||
def apply_port_filter(self, port):
|
||||
pass
|
||||
|
||||
def update_port_filter(self, port):
|
||||
pass
|
||||
|
||||
def remove_port_filter(self, port):
|
||||
pass
|
||||
|
||||
def filter_defer_apply_on(self):
|
||||
pass
|
||||
|
||||
def filter_defer_apply_off(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
return {}
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,221 +0,0 @@
|
|||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
import eventlet.event
|
||||
import eventlet.queue
|
||||
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AsyncProcessException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncProcess(object):
|
||||
"""Manages an asynchronous process.
|
||||
|
||||
This class spawns a new process via subprocess and uses
|
||||
greenthreads to read stderr and stdout asynchronously into queues
|
||||
that can be read via repeatedly calling iter_stdout() and
|
||||
iter_stderr().
|
||||
|
||||
If respawn_interval is non-zero, any error in communicating with
|
||||
the managed process will result in the process and greenthreads
|
||||
being cleaned up and the process restarted after the specified
|
||||
interval.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> import time
|
||||
>>> proc = AsyncProcess(['ping'])
|
||||
>>> proc.start()
|
||||
>>> time.sleep(5)
|
||||
>>> proc.stop()
|
||||
>>> for line in proc.iter_stdout():
|
||||
... print line
|
||||
"""
|
||||
|
||||
def __init__(self, cmd, root_helper=None, respawn_interval=None):
|
||||
"""Constructor.
|
||||
|
||||
:param cmd: The list of command arguments to invoke.
|
||||
:param root_helper: Optional, utility to use when running shell cmds.
|
||||
:param respawn_interval: Optional, the interval in seconds to wait
|
||||
to respawn after unexpected process death. Respawn will
|
||||
only be attempted if a value of 0 or greater is provided.
|
||||
"""
|
||||
self.cmd = cmd
|
||||
self.root_helper = root_helper
|
||||
if respawn_interval is not None and respawn_interval < 0:
|
||||
raise ValueError(_('respawn_interval must be >= 0 if provided.'))
|
||||
self.respawn_interval = respawn_interval
|
||||
self._process = None
|
||||
self._kill_event = None
|
||||
self._reset_queues()
|
||||
self._watchers = []
|
||||
|
||||
def _reset_queues(self):
|
||||
self._stdout_lines = eventlet.queue.LightQueue()
|
||||
self._stderr_lines = eventlet.queue.LightQueue()
|
||||
|
||||
def start(self):
|
||||
"""Launch a process and monitor it asynchronously."""
|
||||
if self._kill_event:
|
||||
raise AsyncProcessException(_('Process is already started'))
|
||||
else:
|
||||
LOG.debug(_('Launching async process [%s].'), self.cmd)
|
||||
self._spawn()
|
||||
|
||||
def stop(self):
|
||||
"""Halt the process and watcher threads."""
|
||||
if self._kill_event:
|
||||
LOG.debug(_('Halting async process [%s].'), self.cmd)
|
||||
self._kill()
|
||||
else:
|
||||
raise AsyncProcessException(_('Process is not running.'))
|
||||
|
||||
def _spawn(self):
|
||||
"""Spawn a process and its watchers."""
|
||||
self._kill_event = eventlet.event.Event()
|
||||
self._process, cmd = utils.create_process(self.cmd,
|
||||
root_helper=self.root_helper)
|
||||
self._watchers = []
|
||||
for reader in (self._read_stdout, self._read_stderr):
|
||||
# Pass the stop event directly to the greenthread to
|
||||
# ensure that assignment of a new event to the instance
|
||||
# attribute does not prevent the greenthread from using
|
||||
# the original event.
|
||||
watcher = eventlet.spawn(self._watch_process,
|
||||
reader,
|
||||
self._kill_event)
|
||||
self._watchers.append(watcher)
|
||||
|
||||
def _kill(self, respawning=False):
|
||||
"""Kill the process and the associated watcher greenthreads.
|
||||
|
||||
:param respawning: Optional, whether respawn will be subsequently
|
||||
attempted.
|
||||
"""
|
||||
# Halt the greenthreads
|
||||
self._kill_event.send()
|
||||
|
||||
pid = self._get_pid_to_kill()
|
||||
if pid:
|
||||
self._kill_process(pid)
|
||||
|
||||
if not respawning:
|
||||
# Clear the kill event to ensure the process can be
|
||||
# explicitly started again.
|
||||
self._kill_event = None
|
||||
|
||||
def _get_pid_to_kill(self):
|
||||
pid = self._process.pid
|
||||
# If root helper was used, two or more processes will be created:
|
||||
#
|
||||
# - a root helper process (e.g. sudo myscript)
|
||||
# - possibly a rootwrap script (e.g. neutron-rootwrap)
|
||||
# - a child process (e.g. myscript)
|
||||
#
|
||||
# Killing the root helper process will leave the child process
|
||||
# running, re-parented to init, so the only way to ensure that both
|
||||
# die is to target the child process directly.
|
||||
if self.root_helper:
|
||||
try:
|
||||
pid = utils.find_child_pids(pid)[0]
|
||||
except IndexError:
|
||||
# Process is already dead
|
||||
return None
|
||||
while True:
|
||||
try:
|
||||
# We shouldn't have more than one child per process
|
||||
# so keep getting the children of the first one
|
||||
pid = utils.find_child_pids(pid)[0]
|
||||
except IndexError:
|
||||
# Last process in the tree, return it
|
||||
break
|
||||
return pid
|
||||
|
||||
def _kill_process(self, pid):
|
||||
try:
|
||||
# A process started by a root helper will be running as
|
||||
# root and need to be killed via the same helper.
|
||||
utils.execute(['kill', '-9', pid], root_helper=self.root_helper)
|
||||
except Exception as ex:
|
||||
stale_pid = (isinstance(ex, RuntimeError) and
|
||||
'No such process' in str(ex))
|
||||
if not stale_pid:
|
||||
LOG.exception(_('An error occurred while killing [%s].'),
|
||||
self.cmd)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _handle_process_error(self):
|
||||
"""Kill the async process and respawn if necessary."""
|
||||
LOG.debug(_('Halting async process [%s] in response to an error.'),
|
||||
self.cmd)
|
||||
respawning = self.respawn_interval >= 0
|
||||
self._kill(respawning=respawning)
|
||||
if respawning:
|
||||
eventlet.sleep(self.respawn_interval)
|
||||
LOG.debug(_('Respawning async process [%s].'), self.cmd)
|
||||
self._spawn()
|
||||
|
||||
def _watch_process(self, callback, kill_event):
|
||||
while not kill_event.ready():
|
||||
try:
|
||||
if not callback():
|
||||
break
|
||||
except Exception:
|
||||
LOG.exception(_('An error occurred while communicating '
|
||||
'with async process [%s].'), self.cmd)
|
||||
break
|
||||
# Ensure that watching a process with lots of output does
|
||||
# not block execution of other greenthreads.
|
||||
eventlet.sleep()
|
||||
# The kill event not being ready indicates that the loop was
|
||||
# broken out of due to an error in the watched process rather
|
||||
# than the loop condition being satisfied.
|
||||
if not kill_event.ready():
|
||||
self._handle_process_error()
|
||||
|
||||
def _read(self, stream, queue):
|
||||
data = stream.readline()
|
||||
if data:
|
||||
data = data.strip()
|
||||
queue.put(data)
|
||||
return data
|
||||
|
||||
def _read_stdout(self):
|
||||
return self._read(self._process.stdout, self._stdout_lines)
|
||||
|
||||
def _read_stderr(self):
|
||||
return self._read(self._process.stderr, self._stderr_lines)
|
||||
|
||||
def _iter_queue(self, queue):
|
||||
while True:
|
||||
try:
|
||||
yield queue.get_nowait()
|
||||
except eventlet.queue.Empty:
|
||||
break
|
||||
|
||||
def iter_stdout(self):
|
||||
return self._iter_queue(self._stdout_lines)
|
||||
|
||||
def iter_stderr(self):
|
||||
return self._iter_queue(self._stderr_lines)
|
|
@ -1,149 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
import atexit
|
||||
import fcntl
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Pidfile(object):
|
||||
def __init__(self, pidfile, procname, uuid=None):
|
||||
self.pidfile = pidfile
|
||||
self.procname = procname
|
||||
self.uuid = uuid
|
||||
try:
|
||||
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
|
||||
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
except IOError:
|
||||
LOG.exception(_("Error while handling pidfile: %s"), pidfile)
|
||||
sys.exit(1)
|
||||
|
||||
def __str__(self):
|
||||
return self.pidfile
|
||||
|
||||
def unlock(self):
|
||||
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
|
||||
raise IOError(_('Unable to unlock pid file'))
|
||||
|
||||
def write(self, pid):
|
||||
os.ftruncate(self.fd, 0)
|
||||
os.write(self.fd, "%d" % pid)
|
||||
os.fsync(self.fd)
|
||||
|
||||
def read(self):
|
||||
try:
|
||||
pid = int(os.read(self.fd, 128))
|
||||
os.lseek(self.fd, 0, os.SEEK_SET)
|
||||
return pid
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
def is_running(self):
|
||||
pid = self.read()
|
||||
if not pid:
|
||||
return False
|
||||
|
||||
cmdline = '/proc/%s/cmdline' % pid
|
||||
try:
|
||||
with open(cmdline, "r") as f:
|
||||
exec_out = f.readline()
|
||||
return self.procname in exec_out and (not self.uuid or
|
||||
self.uuid in exec_out)
|
||||
except IOError:
|
||||
return False
|
||||
|
||||
|
||||
class Daemon(object):
|
||||
"""A generic daemon class.
|
||||
|
||||
Usage: subclass the Daemon class and override the run() method
|
||||
"""
|
||||
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
|
||||
stderr='/dev/null', procname='python', uuid=None):
|
||||
self.stdin = stdin
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
self.procname = procname
|
||||
self.pidfile = Pidfile(pidfile, procname, uuid)
|
||||
|
||||
def _fork(self):
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
sys.exit(0)
|
||||
except OSError:
|
||||
LOG.exception(_('Fork failed'))
|
||||
sys.exit(1)
|
||||
|
||||
def daemonize(self):
|
||||
"""Daemonize process by doing Stevens double fork."""
|
||||
# fork first time
|
||||
self._fork()
|
||||
|
||||
# decouple from parent environment
|
||||
os.chdir("/")
|
||||
os.setsid()
|
||||
os.umask(0)
|
||||
|
||||
# fork second time
|
||||
self._fork()
|
||||
|
||||
# redirect standard file descriptors
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdin = open(self.stdin, 'r')
|
||||
stdout = open(self.stdout, 'a+')
|
||||
stderr = open(self.stderr, 'a+', 0)
|
||||
os.dup2(stdin.fileno(), sys.stdin.fileno())
|
||||
os.dup2(stdout.fileno(), sys.stdout.fileno())
|
||||
os.dup2(stderr.fileno(), sys.stderr.fileno())
|
||||
|
||||
# write pidfile
|
||||
atexit.register(self.delete_pid)
|
||||
signal.signal(signal.SIGTERM, self.handle_sigterm)
|
||||
self.pidfile.write(os.getpid())
|
||||
|
||||
def delete_pid(self):
|
||||
os.remove(str(self.pidfile))
|
||||
|
||||
def handle_sigterm(self, signum, frame):
|
||||
sys.exit(0)
|
||||
|
||||
def start(self):
|
||||
"""Start the daemon."""
|
||||
|
||||
if self.pidfile.is_running():
|
||||
self.pidfile.unlock()
|
||||
message = _('Pidfile %s already exist. Daemon already running?')
|
||||
LOG.error(message, self.pidfile)
|
||||
sys.exit(1)
|
||||
|
||||
# Start the daemon
|
||||
self.daemonize()
|
||||
self.run()
|
||||
|
||||
def run(self):
|
||||
"""Override this method when subclassing Daemon.
|
||||
|
||||
start() will call this method after the process has daemonized.
|
||||
"""
|
||||
pass
|
|
@ -1,921 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import utils as commonutils
|
||||
from neutron.openstack.common import importutils
|
||||
from neutron.openstack.common import jsonutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('dhcp_confs',
|
||||
default='$state_path/dhcp',
|
||||
help=_('Location to store DHCP server config files')),
|
||||
cfg.StrOpt('dhcp_domain',
|
||||
default='openstacklocal',
|
||||
help=_('Domain to use for building the hostnames')),
|
||||
cfg.StrOpt('dnsmasq_config_file',
|
||||
default='',
|
||||
help=_('Override the default dnsmasq settings with this file')),
|
||||
cfg.ListOpt('dnsmasq_dns_servers',
|
||||
help=_('Comma-separated list of the DNS servers which will be '
|
||||
'used as forwarders.'),
|
||||
deprecated_name='dnsmasq_dns_server'),
|
||||
cfg.BoolOpt('dhcp_delete_namespaces', default=False,
|
||||
help=_("Delete namespace after removing a dhcp server.")),
|
||||
cfg.IntOpt(
|
||||
'dnsmasq_lease_max',
|
||||
default=(2 ** 24),
|
||||
help=_('Limit number of leases to prevent a denial-of-service.')),
|
||||
]
|
||||
|
||||
IPV4 = 4
|
||||
IPV6 = 6
|
||||
UDP = 'udp'
|
||||
TCP = 'tcp'
|
||||
DNS_PORT = 53
|
||||
DHCPV4_PORT = 67
|
||||
DHCPV6_PORT = 547
|
||||
METADATA_DEFAULT_PREFIX = 16
|
||||
METADATA_DEFAULT_IP = '169.254.169.254'
|
||||
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
|
||||
METADATA_DEFAULT_PREFIX)
|
||||
METADATA_PORT = 80
|
||||
WIN2k3_STATIC_DNS = 249
|
||||
NS_PREFIX = 'qdhcp-'
|
||||
|
||||
|
||||
class DictModel(dict):
|
||||
"""Convert dict into an object that provides attribute access to values."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Convert dict values to DictModel values."""
|
||||
super(DictModel, self).__init__(*args, **kwargs)
|
||||
|
||||
def needs_upgrade(item):
|
||||
"""Check if `item` is a dict and needs to be changed to DictModel.
|
||||
"""
|
||||
return isinstance(item, dict) and not isinstance(item, DictModel)
|
||||
|
||||
def upgrade(item):
|
||||
"""Upgrade item if it needs to be upgraded."""
|
||||
if needs_upgrade(item):
|
||||
return DictModel(item)
|
||||
else:
|
||||
return item
|
||||
|
||||
for key, value in self.iteritems():
|
||||
if isinstance(value, (list, tuple)):
|
||||
# Keep the same type but convert dicts to DictModels
|
||||
self[key] = type(value)(
|
||||
(upgrade(item) for item in value)
|
||||
)
|
||||
elif needs_upgrade(value):
|
||||
# Change dict instance values to DictModel instance values
|
||||
self[key] = DictModel(value)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self[name]
|
||||
except KeyError as e:
|
||||
raise AttributeError(e)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self[name] = value
|
||||
|
||||
def __delattr__(self, name):
|
||||
del self[name]
|
||||
|
||||
|
||||
class NetModel(DictModel):
|
||||
|
||||
def __init__(self, use_namespaces, d):
|
||||
super(NetModel, self).__init__(d)
|
||||
|
||||
self._ns_name = (use_namespaces and
|
||||
"%s%s" % (NS_PREFIX, self.id) or None)
|
||||
|
||||
@property
|
||||
def namespace(self):
|
||||
return self._ns_name
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DhcpBase(object):
|
||||
|
||||
def __init__(self, conf, network, root_helper='sudo',
|
||||
version=None, plugin=None):
|
||||
self.conf = conf
|
||||
self.network = network
|
||||
self.root_helper = root_helper
|
||||
self.device_manager = DeviceManager(self.conf,
|
||||
self.root_helper, plugin)
|
||||
self.version = version
|
||||
|
||||
@abc.abstractmethod
|
||||
def enable(self):
|
||||
"""Enables DHCP for this network."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def disable(self, retain_port=False):
|
||||
"""Disable dhcp for this network."""
|
||||
|
||||
def restart(self):
|
||||
"""Restart the dhcp service for the network."""
|
||||
self.disable(retain_port=True)
|
||||
self.enable()
|
||||
|
||||
@abc.abstractproperty
|
||||
def active(self):
|
||||
"""Boolean representing the running state of the DHCP server."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def reload_allocations(self):
|
||||
"""Force the DHCP server to reload the assignment database."""
|
||||
|
||||
@classmethod
|
||||
def existing_dhcp_networks(cls, conf, root_helper):
|
||||
"""Return a list of existing networks ids that we have configs for."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def check_version(cls):
|
||||
"""Execute version checks on DHCP server."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class DhcpLocalProcess(DhcpBase):
|
||||
PORTS = []
|
||||
|
||||
def _enable_dhcp(self):
|
||||
"""check if there is a subnet within the network with dhcp enabled."""
|
||||
for subnet in self.network.subnets:
|
||||
if subnet.enable_dhcp:
|
||||
return True
|
||||
return False
|
||||
|
||||
def enable(self):
|
||||
"""Enables DHCP for this network by spawning a local process."""
|
||||
interface_name = self.device_manager.setup(self.network)
|
||||
if self.active:
|
||||
self.restart()
|
||||
elif self._enable_dhcp():
|
||||
self.interface_name = interface_name
|
||||
self.spawn_process()
|
||||
|
||||
def disable(self, retain_port=False):
|
||||
"""Disable DHCP for this network by killing the local process."""
|
||||
pid = self.pid
|
||||
|
||||
if pid:
|
||||
if self.active:
|
||||
cmd = ['kill', '-9', pid]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
else:
|
||||
LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d '
|
||||
'does not exist, performing cleanup'),
|
||||
{'net_id': self.network.id, 'pid': pid})
|
||||
if not retain_port:
|
||||
self.device_manager.destroy(self.network,
|
||||
self.interface_name)
|
||||
else:
|
||||
LOG.debug(_('No DHCP started for %s'), self.network.id)
|
||||
|
||||
self._remove_config_files()
|
||||
|
||||
if not retain_port:
|
||||
if self.conf.dhcp_delete_namespaces and self.network.namespace:
|
||||
ns_ip = ip_lib.IPWrapper(self.root_helper,
|
||||
self.network.namespace)
|
||||
try:
|
||||
ns_ip.netns.delete(self.network.namespace)
|
||||
except RuntimeError:
|
||||
msg = _('Failed trying to delete namespace: %s')
|
||||
LOG.exception(msg, self.network.namespace)
|
||||
|
||||
def _remove_config_files(self):
|
||||
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
|
||||
conf_dir = os.path.join(confs_dir, self.network.id)
|
||||
shutil.rmtree(conf_dir, ignore_errors=True)
|
||||
|
||||
def get_conf_file_name(self, kind, ensure_conf_dir=False):
|
||||
"""Returns the file name for a given kind of config file."""
|
||||
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
|
||||
conf_dir = os.path.join(confs_dir, self.network.id)
|
||||
if ensure_conf_dir:
|
||||
if not os.path.isdir(conf_dir):
|
||||
os.makedirs(conf_dir, 0o755)
|
||||
|
||||
return os.path.join(conf_dir, kind)
|
||||
|
||||
def _get_value_from_conf_file(self, kind, converter=None):
|
||||
"""A helper function to read a value from one of the state files."""
|
||||
file_name = self.get_conf_file_name(kind)
|
||||
msg = _('Error while reading %s')
|
||||
|
||||
try:
|
||||
with open(file_name, 'r') as f:
|
||||
try:
|
||||
return converter and converter(f.read()) or f.read()
|
||||
except ValueError:
|
||||
msg = _('Unable to convert value in %s')
|
||||
except IOError:
|
||||
msg = _('Unable to access %s')
|
||||
|
||||
LOG.debug(msg % file_name)
|
||||
return None
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
"""Last known pid for the DHCP process spawned for this network."""
|
||||
return self._get_value_from_conf_file('pid', int)
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
pid = self.pid
|
||||
if pid is None:
|
||||
return False
|
||||
|
||||
cmdline = '/proc/%s/cmdline' % pid
|
||||
try:
|
||||
with open(cmdline, "r") as f:
|
||||
return self.network.id in f.readline()
|
||||
except IOError:
|
||||
return False
|
||||
|
||||
@property
|
||||
def interface_name(self):
|
||||
return self._get_value_from_conf_file('interface')
|
||||
|
||||
@interface_name.setter
|
||||
def interface_name(self, value):
|
||||
interface_file_path = self.get_conf_file_name('interface',
|
||||
ensure_conf_dir=True)
|
||||
utils.replace_file(interface_file_path, value)
|
||||
|
||||
@abc.abstractmethod
|
||||
def spawn_process(self):
|
||||
pass
|
||||
|
||||
|
||||
class Dnsmasq(DhcpLocalProcess):
|
||||
# The ports that need to be opened when security policies are active
|
||||
# on the Neutron port used for DHCP. These are provided as a convenience
|
||||
# for users of this class.
|
||||
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
|
||||
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
|
||||
}
|
||||
|
||||
_TAG_PREFIX = 'tag%d'
|
||||
|
||||
NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
|
||||
NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
|
||||
MINIMUM_VERSION = 2.59
|
||||
|
||||
@classmethod
|
||||
def check_version(cls):
|
||||
ver = 0
|
||||
try:
|
||||
cmd = ['dnsmasq', '--version']
|
||||
out = utils.execute(cmd)
|
||||
ver = re.findall("\d+.\d+", out)[0]
|
||||
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
|
||||
if not is_valid_version:
|
||||
LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
|
||||
'DHCP AGENT MAY NOT RUN CORRECTLY! '
|
||||
'Please ensure that its version is %s '
|
||||
'or above!'), cls.MINIMUM_VERSION)
|
||||
except (OSError, RuntimeError, IndexError, ValueError):
|
||||
LOG.warning(_('Unable to determine dnsmasq version. '
|
||||
'Please ensure that its version is %s '
|
||||
'or above!'), cls.MINIMUM_VERSION)
|
||||
return float(ver)
|
||||
|
||||
@classmethod
|
||||
def existing_dhcp_networks(cls, conf, root_helper):
|
||||
"""Return a list of existing networks ids that we have configs for."""
|
||||
|
||||
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
|
||||
|
||||
return [
|
||||
c for c in os.listdir(confs_dir)
|
||||
if uuidutils.is_uuid_like(c)
|
||||
]
|
||||
|
||||
def spawn_process(self):
|
||||
"""Spawns a Dnsmasq process for the network."""
|
||||
env = {
|
||||
self.NEUTRON_NETWORK_ID_KEY: self.network.id,
|
||||
}
|
||||
|
||||
cmd = [
|
||||
'dnsmasq',
|
||||
'--no-hosts',
|
||||
'--no-resolv',
|
||||
'--strict-order',
|
||||
'--bind-interfaces',
|
||||
'--interface=%s' % self.interface_name,
|
||||
'--except-interface=lo',
|
||||
'--pid-file=%s' % self.get_conf_file_name(
|
||||
'pid', ensure_conf_dir=True),
|
||||
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
|
||||
'--addn-hosts=%s' % self._output_addn_hosts_file(),
|
||||
'--dhcp-optsfile=%s' % self._output_opts_file(),
|
||||
'--leasefile-ro',
|
||||
]
|
||||
|
||||
possible_leases = 0
|
||||
for i, subnet in enumerate(self.network.subnets):
|
||||
# if a subnet is specified to have dhcp disabled
|
||||
if not subnet.enable_dhcp:
|
||||
continue
|
||||
if subnet.ip_version == 4:
|
||||
mode = 'static'
|
||||
else:
|
||||
# Note(scollins) If the IPv6 attributes are not set, set it as
|
||||
# static to preserve previous behavior
|
||||
if (not getattr(subnet, 'ipv6_ra_mode', None) and
|
||||
not getattr(subnet, 'ipv6_address_mode', None)):
|
||||
mode = 'static'
|
||||
elif getattr(subnet, 'ipv6_ra_mode', None) is None:
|
||||
# RA mode is not set - do not launch dnsmasq
|
||||
continue
|
||||
if self.version >= self.MINIMUM_VERSION:
|
||||
set_tag = 'set:'
|
||||
else:
|
||||
set_tag = ''
|
||||
|
||||
cidr = netaddr.IPNetwork(subnet.cidr)
|
||||
|
||||
if self.conf.dhcp_lease_duration == -1:
|
||||
lease = 'infinite'
|
||||
else:
|
||||
lease = '%ss' % self.conf.dhcp_lease_duration
|
||||
|
||||
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
|
||||
(set_tag, self._TAG_PREFIX % i,
|
||||
cidr.network, mode, lease))
|
||||
|
||||
possible_leases += cidr.size
|
||||
|
||||
# Cap the limit because creating lots of subnets can inflate
|
||||
# this possible lease cap.
|
||||
cmd.append('--dhcp-lease-max=%d' %
|
||||
min(possible_leases, self.conf.dnsmasq_lease_max))
|
||||
|
||||
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
|
||||
if self.conf.dnsmasq_dns_servers:
|
||||
cmd.extend(
|
||||
'--server=%s' % server
|
||||
for server in self.conf.dnsmasq_dns_servers)
|
||||
|
||||
if self.conf.dhcp_domain:
|
||||
cmd.append('--domain=%s' % self.conf.dhcp_domain)
|
||||
|
||||
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
|
||||
self.network.namespace)
|
||||
ip_wrapper.netns.execute(cmd, addl_env=env)
|
||||
|
||||
def _release_lease(self, mac_address, ip):
|
||||
"""Release a DHCP lease."""
|
||||
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
|
||||
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
|
||||
self.network.namespace)
|
||||
ip_wrapper.netns.execute(cmd)
|
||||
|
||||
def reload_allocations(self):
|
||||
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
|
||||
|
||||
# If all subnets turn off dhcp, kill the process.
|
||||
if not self._enable_dhcp():
|
||||
self.disable()
|
||||
LOG.debug(_('Killing dhcpmasq for network since all subnets have '
|
||||
'turned off DHCP: %s'), self.network.id)
|
||||
return
|
||||
|
||||
self._release_unused_leases()
|
||||
self._output_hosts_file()
|
||||
self._output_addn_hosts_file()
|
||||
self._output_opts_file()
|
||||
if self.active:
|
||||
cmd = ['kill', '-HUP', self.pid]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
else:
|
||||
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
|
||||
LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
|
||||
self.device_manager.update(self.network, self.interface_name)
|
||||
|
||||
def _iter_hosts(self):
|
||||
"""Iterate over hosts.
|
||||
|
||||
For each host on the network we yield a tuple containing:
|
||||
(
|
||||
port, # a DictModel instance representing the port.
|
||||
alloc, # a DictModel instance of the allocated ip and subnet.
|
||||
host_name, # Host name.
|
||||
name, # Host name and domain name in the format 'hostname.domain'.
|
||||
)
|
||||
"""
|
||||
v6_nets = dict((subnet.id, subnet) for subnet in
|
||||
self.network.subnets if subnet.ip_version == 6)
|
||||
for port in self.network.ports:
|
||||
for alloc in port.fixed_ips:
|
||||
# Note(scollins) Only create entries that are
|
||||
# associated with the subnet being managed by this
|
||||
# dhcp agent
|
||||
if alloc.subnet_id in v6_nets:
|
||||
ra_mode = v6_nets[alloc.subnet_id].ipv6_ra_mode
|
||||
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
|
||||
if (ra_mode is None and addr_mode == constants.IPV6_SLAAC):
|
||||
continue
|
||||
hostname = 'host-%s' % alloc.ip_address.replace(
|
||||
'.', '-').replace(':', '-')
|
||||
fqdn = '%s.%s' % (hostname, self.conf.dhcp_domain)
|
||||
yield (port, alloc, hostname, fqdn)
|
||||
|
||||
def _output_hosts_file(self):
|
||||
"""Writes a dnsmasq compatible dhcp hosts file.
|
||||
|
||||
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
|
||||
and lists the hosts on the network which should receive a dhcp lease.
|
||||
Each line in this file is in the form::
|
||||
|
||||
'mac_address,FQDN,ip_address'
|
||||
|
||||
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
|
||||
this file if it did not give a lease to a host listed in it (e.g.:
|
||||
multiple dnsmasq instances on the same network if this network is on
|
||||
multiple network nodes). This file is only defining hosts which
|
||||
should receive a dhcp lease, the hosts resolution in itself is
|
||||
defined by the `_output_addn_hosts_file` method.
|
||||
"""
|
||||
buf = six.StringIO()
|
||||
filename = self.get_conf_file_name('host')
|
||||
|
||||
LOG.debug(_('Building host file: %s'), filename)
|
||||
for (port, alloc, hostname, name) in self._iter_hosts():
|
||||
set_tag = ''
|
||||
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
|
||||
# it with '[]' to let dnsmasq to distinguish MAC address from
|
||||
# IPv6 address.
|
||||
ip_address = alloc.ip_address
|
||||
if netaddr.valid_ipv6(ip_address):
|
||||
ip_address = '[%s]' % ip_address
|
||||
|
||||
LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'),
|
||||
{"mac": port.mac_address, "name": name,
|
||||
"ip": ip_address})
|
||||
|
||||
if getattr(port, 'extra_dhcp_opts', False):
|
||||
if self.version >= self.MINIMUM_VERSION:
|
||||
set_tag = 'set:'
|
||||
|
||||
buf.write('%s,%s,%s,%s%s\n' %
|
||||
(port.mac_address, name, ip_address,
|
||||
set_tag, port.id))
|
||||
else:
|
||||
buf.write('%s,%s,%s\n' %
|
||||
(port.mac_address, name, ip_address))
|
||||
|
||||
utils.replace_file(filename, buf.getvalue())
|
||||
LOG.debug(_('Done building host file %s'), filename)
|
||||
return filename
|
||||
|
||||
def _read_hosts_file_leases(self, filename):
|
||||
leases = set()
|
||||
if os.path.exists(filename):
|
||||
with open(filename) as f:
|
||||
for l in f.readlines():
|
||||
host = l.strip().split(',')
|
||||
leases.add((host[2], host[0]))
|
||||
return leases
|
||||
|
||||
def _release_unused_leases(self):
|
||||
filename = self.get_conf_file_name('host')
|
||||
old_leases = self._read_hosts_file_leases(filename)
|
||||
|
||||
new_leases = set()
|
||||
for port in self.network.ports:
|
||||
for alloc in port.fixed_ips:
|
||||
new_leases.add((alloc.ip_address, port.mac_address))
|
||||
|
||||
for ip, mac in old_leases - new_leases:
|
||||
self._release_lease(mac, ip)
|
||||
|
||||
def _output_addn_hosts_file(self):
|
||||
"""Writes a dnsmasq compatible additional hosts file.
|
||||
|
||||
The generated file is sent to the --addn-hosts option of dnsmasq,
|
||||
and lists the hosts on the network which should be resolved even if
|
||||
the dnsmaq instance did not give a lease to the host (see the
|
||||
`_output_hosts_file` method).
|
||||
Each line in this file is in the same form as a standard /etc/hosts
|
||||
file.
|
||||
"""
|
||||
buf = six.StringIO()
|
||||
for (port, alloc, hostname, fqdn) in self._iter_hosts():
|
||||
# It is compulsory to write the `fqdn` before the `hostname` in
|
||||
# order to obtain it in PTR responses.
|
||||
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
|
||||
addn_hosts = self.get_conf_file_name('addn_hosts')
|
||||
utils.replace_file(addn_hosts, buf.getvalue())
|
||||
return addn_hosts
|
||||
|
||||
def _output_opts_file(self):
|
||||
"""Write a dnsmasq compatible options file."""
|
||||
|
||||
if self.conf.enable_isolated_metadata:
|
||||
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
|
||||
|
||||
options = []
|
||||
|
||||
dhcp_ips = collections.defaultdict(list)
|
||||
subnet_idx_map = {}
|
||||
for i, subnet in enumerate(self.network.subnets):
|
||||
if not subnet.enable_dhcp:
|
||||
continue
|
||||
if subnet.dns_nameservers:
|
||||
options.append(
|
||||
self._format_option(i, 'dns-server',
|
||||
','.join(subnet.dns_nameservers)))
|
||||
else:
|
||||
# use the dnsmasq ip as nameservers only if there is no
|
||||
# dns-server submitted by the server
|
||||
subnet_idx_map[subnet.id] = i
|
||||
|
||||
gateway = subnet.gateway_ip
|
||||
host_routes = []
|
||||
for hr in subnet.host_routes:
|
||||
if hr.destination == "0.0.0.0/0":
|
||||
if not gateway:
|
||||
gateway = hr.nexthop
|
||||
else:
|
||||
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
|
||||
|
||||
# Add host routes for isolated network segments
|
||||
|
||||
if self._enable_metadata(subnet):
|
||||
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
|
||||
host_routes.append(
|
||||
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
|
||||
)
|
||||
|
||||
if host_routes:
|
||||
if gateway and subnet.ip_version == 4:
|
||||
host_routes.append("%s,%s" % ("0.0.0.0/0", gateway))
|
||||
options.append(
|
||||
self._format_option(i, 'classless-static-route',
|
||||
','.join(host_routes)))
|
||||
options.append(
|
||||
self._format_option(i, WIN2k3_STATIC_DNS,
|
||||
','.join(host_routes)))
|
||||
|
||||
if subnet.ip_version == 4:
|
||||
if gateway:
|
||||
options.append(self._format_option(i, 'router', gateway))
|
||||
else:
|
||||
options.append(self._format_option(i, 'router'))
|
||||
|
||||
for port in self.network.ports:
|
||||
if getattr(port, 'extra_dhcp_opts', False):
|
||||
options.extend(
|
||||
self._format_option(port.id, opt.opt_name, opt.opt_value)
|
||||
for opt in port.extra_dhcp_opts)
|
||||
|
||||
# provides all dnsmasq ip as dns-server if there is more than
|
||||
# one dnsmasq for a subnet and there is no dns-server submitted
|
||||
# by the server
|
||||
if port.device_owner == constants.DEVICE_OWNER_DHCP:
|
||||
for ip in port.fixed_ips:
|
||||
i = subnet_idx_map.get(ip.subnet_id)
|
||||
if i is None:
|
||||
continue
|
||||
dhcp_ips[i].append(ip.ip_address)
|
||||
|
||||
for i, ips in dhcp_ips.items():
|
||||
if len(ips) > 1:
|
||||
options.append(self._format_option(i,
|
||||
'dns-server',
|
||||
','.join(ips)))
|
||||
|
||||
name = self.get_conf_file_name('opts')
|
||||
utils.replace_file(name, '\n'.join(options))
|
||||
return name
|
||||
|
||||
def _make_subnet_interface_ip_map(self):
|
||||
ip_dev = ip_lib.IPDevice(
|
||||
self.interface_name,
|
||||
self.root_helper,
|
||||
self.network.namespace
|
||||
)
|
||||
|
||||
subnet_lookup = dict(
|
||||
(netaddr.IPNetwork(subnet.cidr), subnet.id)
|
||||
for subnet in self.network.subnets
|
||||
)
|
||||
|
||||
retval = {}
|
||||
|
||||
for addr in ip_dev.addr.list():
|
||||
ip_net = netaddr.IPNetwork(addr['cidr'])
|
||||
|
||||
if ip_net in subnet_lookup:
|
||||
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
|
||||
|
||||
return retval
|
||||
|
||||
def _format_option(self, tag, option, *args):
|
||||
"""Format DHCP option by option name or code."""
|
||||
if self.version >= self.MINIMUM_VERSION:
|
||||
set_tag = 'tag:'
|
||||
else:
|
||||
set_tag = ''
|
||||
|
||||
option = str(option)
|
||||
|
||||
if isinstance(tag, int):
|
||||
tag = self._TAG_PREFIX % tag
|
||||
|
||||
if not option.isdigit():
|
||||
option = 'option:%s' % option
|
||||
|
||||
return ','.join((set_tag + tag, '%s' % option) + args)
|
||||
|
||||
def _enable_metadata(self, subnet):
|
||||
'''Determine if the metadata route will be pushed to hosts on subnet.
|
||||
|
||||
If subnet has a Neutron router attached, we want the hosts to get
|
||||
metadata from the router's proxy via their default route instead.
|
||||
'''
|
||||
if self.conf.enable_isolated_metadata and subnet.ip_version == 4:
|
||||
if subnet.gateway_ip is None:
|
||||
return True
|
||||
else:
|
||||
for port in self.network.ports:
|
||||
if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF:
|
||||
for alloc in port.fixed_ips:
|
||||
if alloc.subnet_id == subnet.id:
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def lease_update(cls):
|
||||
network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
|
||||
dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
|
||||
|
||||
action = sys.argv[1]
|
||||
if action not in ('add', 'del', 'old'):
|
||||
sys.exit()
|
||||
|
||||
mac_address = sys.argv[2]
|
||||
ip_address = sys.argv[3]
|
||||
|
||||
if action == 'del':
|
||||
lease_remaining = 0
|
||||
else:
|
||||
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
|
||||
|
||||
data = dict(network_id=network_id, mac_address=mac_address,
|
||||
ip_address=ip_address, lease_remaining=lease_remaining)
|
||||
|
||||
if os.path.exists(dhcp_relay_socket):
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.connect(dhcp_relay_socket)
|
||||
sock.send(jsonutils.dumps(data))
|
||||
sock.close()
|
||||
|
||||
|
||||
class DeviceManager(object):
|
||||
|
||||
def __init__(self, conf, root_helper, plugin):
|
||||
self.conf = conf
|
||||
self.root_helper = root_helper
|
||||
self.plugin = plugin
|
||||
if not conf.interface_driver:
|
||||
msg = _('An interface driver must be specified')
|
||||
LOG.error(msg)
|
||||
raise SystemExit(1)
|
||||
try:
|
||||
self.driver = importutils.import_object(
|
||||
conf.interface_driver, conf)
|
||||
except Exception as e:
|
||||
msg = (_("Error importing interface driver '%(driver)s': "
|
||||
"%(inner)s") % {'driver': conf.interface_driver,
|
||||
'inner': e})
|
||||
LOG.error(msg)
|
||||
raise SystemExit(1)
|
||||
|
||||
def get_interface_name(self, network, port):
|
||||
"""Return interface(device) name for use by the DHCP process."""
|
||||
return self.driver.get_device_name(port)
|
||||
|
||||
def get_device_id(self, network):
|
||||
"""Return a unique DHCP device ID for this host on the network."""
|
||||
# There could be more than one dhcp server per network, so create
|
||||
# a device id that combines host and network ids
|
||||
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
|
||||
|
||||
def _set_default_route(self, network, device_name):
|
||||
"""Sets the default gateway for this dhcp namespace.
|
||||
|
||||
This method is idempotent and will only adjust the route if adjusting
|
||||
it would change it from what it already is. This makes it safe to call
|
||||
and avoids unnecessary perturbation of the system.
|
||||
"""
|
||||
device = ip_lib.IPDevice(device_name,
|
||||
self.root_helper,
|
||||
network.namespace)
|
||||
gateway = device.route.get_gateway()
|
||||
if gateway:
|
||||
gateway = gateway['gateway']
|
||||
|
||||
for subnet in network.subnets:
|
||||
skip_subnet = (
|
||||
subnet.ip_version != 4
|
||||
or not subnet.enable_dhcp
|
||||
or subnet.gateway_ip is None)
|
||||
|
||||
if skip_subnet:
|
||||
continue
|
||||
|
||||
if gateway != subnet.gateway_ip:
|
||||
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
|
||||
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
|
||||
|
||||
device.route.add_gateway(subnet.gateway_ip)
|
||||
|
||||
return
|
||||
|
||||
# No subnets on the network have a valid gateway. Clean it up to avoid
|
||||
# confusion from seeing an invalid gateway here.
|
||||
if gateway is not None:
|
||||
msg = _('Removing gateway for dhcp netns on net %s')
|
||||
LOG.debug(msg, network.id)
|
||||
|
||||
device.route.delete_gateway(gateway)
|
||||
|
||||
def setup_dhcp_port(self, network):
|
||||
"""Create/update DHCP port for the host if needed and return port."""
|
||||
|
||||
device_id = self.get_device_id(network)
|
||||
subnets = {}
|
||||
dhcp_enabled_subnet_ids = []
|
||||
for subnet in network.subnets:
|
||||
if subnet.enable_dhcp:
|
||||
dhcp_enabled_subnet_ids.append(subnet.id)
|
||||
subnets[subnet.id] = subnet
|
||||
|
||||
dhcp_port = None
|
||||
for port in network.ports:
|
||||
port_device_id = getattr(port, 'device_id', None)
|
||||
if port_device_id == device_id:
|
||||
port_fixed_ips = []
|
||||
for fixed_ip in port.fixed_ips:
|
||||
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
|
||||
'ip_address': fixed_ip.ip_address})
|
||||
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
|
||||
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
|
||||
|
||||
# If there are dhcp_enabled_subnet_ids here that means that
|
||||
# we need to add those to the port and call update.
|
||||
if dhcp_enabled_subnet_ids:
|
||||
port_fixed_ips.extend(
|
||||
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
|
||||
dhcp_port = self.plugin.update_dhcp_port(
|
||||
port.id, {'port': {'network_id': network.id,
|
||||
'fixed_ips': port_fixed_ips}})
|
||||
if not dhcp_port:
|
||||
raise exceptions.Conflict()
|
||||
else:
|
||||
dhcp_port = port
|
||||
# break since we found port that matches device_id
|
||||
break
|
||||
|
||||
# check for a reserved DHCP port
|
||||
if dhcp_port is None:
|
||||
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
|
||||
' does not yet exist. Checking for a reserved port.'),
|
||||
{'device_id': device_id, 'network_id': network.id})
|
||||
for port in network.ports:
|
||||
port_device_id = getattr(port, 'device_id', None)
|
||||
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
|
||||
dhcp_port = self.plugin.update_dhcp_port(
|
||||
port.id, {'port': {'network_id': network.id,
|
||||
'device_id': device_id}})
|
||||
if dhcp_port:
|
||||
break
|
||||
|
||||
# DHCP port has not yet been created.
|
||||
if dhcp_port is None:
|
||||
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
|
||||
' does not yet exist.'), {'device_id': device_id,
|
||||
'network_id': network.id})
|
||||
port_dict = dict(
|
||||
name='',
|
||||
admin_state_up=True,
|
||||
device_id=device_id,
|
||||
network_id=network.id,
|
||||
tenant_id=network.tenant_id,
|
||||
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
|
||||
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
|
||||
|
||||
if not dhcp_port:
|
||||
raise exceptions.Conflict()
|
||||
|
||||
# Convert subnet_id to subnet dict
|
||||
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
|
||||
ip_address=fixed_ip.ip_address,
|
||||
subnet=subnets[fixed_ip.subnet_id])
|
||||
for fixed_ip in dhcp_port.fixed_ips]
|
||||
|
||||
ips = [DictModel(item) if isinstance(item, dict) else item
|
||||
for item in fixed_ips]
|
||||
dhcp_port.fixed_ips = ips
|
||||
|
||||
return dhcp_port
|
||||
|
||||
def setup(self, network):
|
||||
"""Create and initialize a device for network's DHCP on this host."""
|
||||
port = self.setup_dhcp_port(network)
|
||||
interface_name = self.get_interface_name(network, port)
|
||||
|
||||
if ip_lib.ensure_device_is_ready(interface_name,
|
||||
self.root_helper,
|
||||
network.namespace):
|
||||
LOG.debug(_('Reusing existing device: %s.'), interface_name)
|
||||
else:
|
||||
self.driver.plug(network.id,
|
||||
port.id,
|
||||
interface_name,
|
||||
port.mac_address,
|
||||
namespace=network.namespace)
|
||||
ip_cidrs = []
|
||||
for fixed_ip in port.fixed_ips:
|
||||
subnet = fixed_ip.subnet
|
||||
net = netaddr.IPNetwork(subnet.cidr)
|
||||
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
|
||||
ip_cidrs.append(ip_cidr)
|
||||
|
||||
if (self.conf.enable_isolated_metadata and
|
||||
self.conf.use_namespaces):
|
||||
ip_cidrs.append(METADATA_DEFAULT_CIDR)
|
||||
|
||||
self.driver.init_l3(interface_name, ip_cidrs,
|
||||
namespace=network.namespace)
|
||||
|
||||
# ensure that the dhcp interface is first in the list
|
||||
if network.namespace is None:
|
||||
device = ip_lib.IPDevice(interface_name,
|
||||
self.root_helper)
|
||||
device.route.pullup_route(interface_name)
|
||||
|
||||
if self.conf.use_namespaces:
|
||||
self._set_default_route(network, interface_name)
|
||||
|
||||
return interface_name
|
||||
|
||||
def update(self, network, device_name):
|
||||
"""Update device settings for the network's DHCP on this host."""
|
||||
if self.conf.use_namespaces:
|
||||
self._set_default_route(network, device_name)
|
||||
|
||||
def destroy(self, network, device_name):
|
||||
"""Destroy the device used for the network's DHCP on this host."""
|
||||
self.driver.unplug(device_name, namespace=network.namespace)
|
||||
|
||||
self.plugin.release_dhcp_port(network.id,
|
||||
self.get_device_id(network))
|
|
@ -1,102 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('external_pids',
|
||||
default='$state_path/external/pids',
|
||||
help=_('Location to store child pid files')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
|
||||
class ProcessManager(object):
|
||||
"""An external process manager for Neutron spawned processes.
|
||||
|
||||
Note: The manager expects uuid to be in cmdline.
|
||||
"""
|
||||
def __init__(self, conf, uuid, root_helper='sudo', namespace=None):
|
||||
self.conf = conf
|
||||
self.uuid = uuid
|
||||
self.root_helper = root_helper
|
||||
self.namespace = namespace
|
||||
|
||||
def enable(self, cmd_callback):
|
||||
if not self.active:
|
||||
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
|
||||
|
||||
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
|
||||
ip_wrapper.netns.execute(cmd)
|
||||
|
||||
def disable(self):
|
||||
pid = self.pid
|
||||
|
||||
if self.active:
|
||||
cmd = ['kill', '-9', pid]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
elif pid:
|
||||
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
|
||||
'command'), {'uuid': self.uuid, 'pid': pid})
|
||||
else:
|
||||
LOG.debug(_('No process started for %s'), self.uuid)
|
||||
|
||||
def get_pid_file_name(self, ensure_pids_dir=False):
|
||||
"""Returns the file name for a given kind of config file."""
|
||||
pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids))
|
||||
if ensure_pids_dir and not os.path.isdir(pids_dir):
|
||||
os.makedirs(pids_dir, 0o755)
|
||||
|
||||
return os.path.join(pids_dir, self.uuid + '.pid')
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
"""Last known pid for this external process spawned for this uuid."""
|
||||
file_name = self.get_pid_file_name()
|
||||
msg = _('Error while reading %s')
|
||||
|
||||
try:
|
||||
with open(file_name, 'r') as f:
|
||||
return int(f.read())
|
||||
except IOError:
|
||||
msg = _('Unable to access %s')
|
||||
except ValueError:
|
||||
msg = _('Unable to convert value in %s')
|
||||
|
||||
LOG.debug(msg, file_name)
|
||||
return None
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
pid = self.pid
|
||||
if pid is None:
|
||||
return False
|
||||
|
||||
cmdline = '/proc/%s/cmdline' % pid
|
||||
try:
|
||||
with open(cmdline, "r") as f:
|
||||
return self.uuid in f.readline()
|
||||
except IOError:
|
||||
return False
|
|
@ -1,448 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
import six
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import ovs_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import exceptions
|
||||
from neutron.extensions import flavor
|
||||
from neutron.openstack.common import importutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('ovs_integration_bridge',
|
||||
default='br-int',
|
||||
help=_('Name of Open vSwitch bridge to use')),
|
||||
cfg.BoolOpt('ovs_use_veth',
|
||||
default=False,
|
||||
help=_('Uses veth for an interface or not')),
|
||||
cfg.IntOpt('network_device_mtu',
|
||||
help=_('MTU setting for device.')),
|
||||
cfg.StrOpt('meta_flavor_driver_mappings',
|
||||
help=_('Mapping between flavor and LinuxInterfaceDriver')),
|
||||
cfg.StrOpt('admin_user',
|
||||
help=_("Admin username")),
|
||||
cfg.StrOpt('admin_password',
|
||||
help=_("Admin password"),
|
||||
secret=True),
|
||||
cfg.StrOpt('admin_tenant_name',
|
||||
help=_("Admin tenant name")),
|
||||
cfg.StrOpt('auth_url',
|
||||
help=_("Authentication URL")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.StrOpt('auth_region',
|
||||
help=_("Authentication region")),
|
||||
]
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class LinuxInterfaceDriver(object):
|
||||
|
||||
# from linux IF_NAMESIZE
|
||||
DEV_NAME_LEN = 14
|
||||
DEV_NAME_PREFIX = 'tap'
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.root_helper = config.get_root_helper(conf)
|
||||
|
||||
def init_l3(self, device_name, ip_cidrs, namespace=None,
|
||||
preserve_ips=[], gateway=None, extra_subnets=[]):
|
||||
"""Set the L3 settings for the interface using data from the port.
|
||||
|
||||
ip_cidrs: list of 'X.X.X.X/YY' strings
|
||||
preserve_ips: list of ip cidrs that should not be removed from device
|
||||
"""
|
||||
device = ip_lib.IPDevice(device_name,
|
||||
self.root_helper,
|
||||
namespace=namespace)
|
||||
|
||||
previous = {}
|
||||
for address in device.addr.list(scope='global', filters=['permanent']):
|
||||
previous[address['cidr']] = address['ip_version']
|
||||
|
||||
# add new addresses
|
||||
for ip_cidr in ip_cidrs:
|
||||
|
||||
net = netaddr.IPNetwork(ip_cidr)
|
||||
# Convert to compact IPv6 address because the return values of
|
||||
# "ip addr list" are compact.
|
||||
if net.version == 6:
|
||||
ip_cidr = str(net)
|
||||
if ip_cidr in previous:
|
||||
del previous[ip_cidr]
|
||||
continue
|
||||
|
||||
device.addr.add(net.version, ip_cidr, str(net.broadcast))
|
||||
|
||||
# clean up any old addresses
|
||||
for ip_cidr, ip_version in previous.items():
|
||||
if ip_cidr not in preserve_ips:
|
||||
device.addr.delete(ip_version, ip_cidr)
|
||||
|
||||
if gateway:
|
||||
device.route.add_gateway(gateway)
|
||||
|
||||
new_onlink_routes = set(s['cidr'] for s in extra_subnets)
|
||||
existing_onlink_routes = set(device.route.list_onlink_routes())
|
||||
for route in new_onlink_routes - existing_onlink_routes:
|
||||
device.route.add_onlink_route(route)
|
||||
for route in existing_onlink_routes - new_onlink_routes:
|
||||
device.route.delete_onlink_route(route)
|
||||
|
||||
def check_bridge_exists(self, bridge):
|
||||
if not ip_lib.device_exists(bridge):
|
||||
raise exceptions.BridgeDoesNotExist(bridge=bridge)
|
||||
|
||||
def get_device_name(self, port):
|
||||
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
|
||||
|
||||
@abc.abstractmethod
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
"""Plug in the interface."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
"""Unplug the interface."""
|
||||
|
||||
|
||||
class NullDriver(LinuxInterfaceDriver):
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
pass
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
pass
|
||||
|
||||
|
||||
class OVSInterfaceDriver(LinuxInterfaceDriver):
|
||||
"""Driver for creating an internal interface on an OVS bridge."""
|
||||
|
||||
DEV_NAME_PREFIX = 'tap'
|
||||
|
||||
def __init__(self, conf):
|
||||
super(OVSInterfaceDriver, self).__init__(conf)
|
||||
if self.conf.ovs_use_veth:
|
||||
self.DEV_NAME_PREFIX = 'ns-'
|
||||
|
||||
def _get_tap_name(self, dev_name, prefix=None):
|
||||
if self.conf.ovs_use_veth:
|
||||
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap')
|
||||
return dev_name
|
||||
|
||||
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
|
||||
internal=True):
|
||||
cmd = ['ovs-vsctl', '--', '--if-exists', 'del-port', device_name, '--',
|
||||
'add-port', bridge, device_name]
|
||||
if internal:
|
||||
cmd += ['--', 'set', 'Interface', device_name, 'type=internal']
|
||||
cmd += ['--', 'set', 'Interface', device_name,
|
||||
'external-ids:iface-id=%s' % port_id,
|
||||
'--', 'set', 'Interface', device_name,
|
||||
'external-ids:iface-status=active',
|
||||
'--', 'set', 'Interface', device_name,
|
||||
'external-ids:attached-mac=%s' % mac_address]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
"""Plug in the interface."""
|
||||
if not bridge:
|
||||
bridge = self.conf.ovs_integration_bridge
|
||||
|
||||
if not ip_lib.device_exists(device_name,
|
||||
self.root_helper,
|
||||
namespace=namespace):
|
||||
|
||||
self.check_bridge_exists(bridge)
|
||||
|
||||
ip = ip_lib.IPWrapper(self.root_helper)
|
||||
tap_name = self._get_tap_name(device_name, prefix)
|
||||
|
||||
if self.conf.ovs_use_veth:
|
||||
# Create ns_dev in a namespace if one is configured.
|
||||
root_dev, ns_dev = ip.add_veth(tap_name,
|
||||
device_name,
|
||||
namespace2=namespace)
|
||||
else:
|
||||
ns_dev = ip.device(device_name)
|
||||
|
||||
internal = not self.conf.ovs_use_veth
|
||||
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
|
||||
internal=internal)
|
||||
|
||||
ns_dev.link.set_address(mac_address)
|
||||
|
||||
if self.conf.network_device_mtu:
|
||||
ns_dev.link.set_mtu(self.conf.network_device_mtu)
|
||||
if self.conf.ovs_use_veth:
|
||||
root_dev.link.set_mtu(self.conf.network_device_mtu)
|
||||
|
||||
# Add an interface created by ovs to the namespace.
|
||||
if not self.conf.ovs_use_veth and namespace:
|
||||
namespace_obj = ip.ensure_namespace(namespace)
|
||||
namespace_obj.add_device_to_namespace(ns_dev)
|
||||
|
||||
ns_dev.link.set_up()
|
||||
if self.conf.ovs_use_veth:
|
||||
root_dev.link.set_up()
|
||||
else:
|
||||
LOG.info(_("Device %s already exists"), device_name)
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
"""Unplug the interface."""
|
||||
if not bridge:
|
||||
bridge = self.conf.ovs_integration_bridge
|
||||
|
||||
tap_name = self._get_tap_name(device_name, prefix)
|
||||
self.check_bridge_exists(bridge)
|
||||
ovs = ovs_lib.OVSBridge(bridge, self.root_helper)
|
||||
|
||||
try:
|
||||
ovs.delete_port(tap_name)
|
||||
if self.conf.ovs_use_veth:
|
||||
device = ip_lib.IPDevice(device_name,
|
||||
self.root_helper,
|
||||
namespace)
|
||||
device.link.delete()
|
||||
LOG.debug(_("Unplugged interface '%s'"), device_name)
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"),
|
||||
device_name)
|
||||
|
||||
|
||||
class MidonetInterfaceDriver(LinuxInterfaceDriver):
|
||||
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
"""This method is called by the Dhcp agent or by the L3 agent
|
||||
when a new network is created
|
||||
"""
|
||||
if not ip_lib.device_exists(device_name,
|
||||
self.root_helper,
|
||||
namespace=namespace):
|
||||
ip = ip_lib.IPWrapper(self.root_helper)
|
||||
tap_name = device_name.replace(prefix or 'tap', 'tap')
|
||||
|
||||
# Create ns_dev in a namespace if one is configured.
|
||||
root_dev, ns_dev = ip.add_veth(tap_name, device_name,
|
||||
namespace2=namespace)
|
||||
|
||||
ns_dev.link.set_address(mac_address)
|
||||
|
||||
# Add an interface created by ovs to the namespace.
|
||||
namespace_obj = ip.ensure_namespace(namespace)
|
||||
namespace_obj.add_device_to_namespace(ns_dev)
|
||||
|
||||
ns_dev.link.set_up()
|
||||
root_dev.link.set_up()
|
||||
|
||||
cmd = ['mm-ctl', '--bind-port', port_id, device_name]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
|
||||
else:
|
||||
LOG.info(_("Device %s already exists"), device_name)
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
# the port will be deleted by the dhcp agent that will call the plugin
|
||||
device = ip_lib.IPDevice(device_name,
|
||||
self.root_helper,
|
||||
namespace)
|
||||
try:
|
||||
device.link.delete()
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"), device_name)
|
||||
LOG.debug(_("Unplugged interface '%s'"), device_name)
|
||||
|
||||
ip_lib.IPWrapper(
|
||||
self.root_helper, namespace).garbage_collect_namespace()
|
||||
|
||||
|
||||
class IVSInterfaceDriver(LinuxInterfaceDriver):
|
||||
"""Driver for creating an internal interface on an IVS bridge."""
|
||||
|
||||
DEV_NAME_PREFIX = 'tap'
|
||||
|
||||
def __init__(self, conf):
|
||||
super(IVSInterfaceDriver, self).__init__(conf)
|
||||
self.DEV_NAME_PREFIX = 'ns-'
|
||||
|
||||
def _get_tap_name(self, dev_name, prefix=None):
|
||||
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap')
|
||||
return dev_name
|
||||
|
||||
def _ivs_add_port(self, device_name, port_id, mac_address):
|
||||
cmd = ['ivs-ctl', 'add-port', device_name]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
"""Plug in the interface."""
|
||||
if not ip_lib.device_exists(device_name,
|
||||
self.root_helper,
|
||||
namespace=namespace):
|
||||
|
||||
ip = ip_lib.IPWrapper(self.root_helper)
|
||||
tap_name = self._get_tap_name(device_name, prefix)
|
||||
|
||||
root_dev, ns_dev = ip.add_veth(tap_name, device_name)
|
||||
|
||||
self._ivs_add_port(tap_name, port_id, mac_address)
|
||||
|
||||
ns_dev = ip.device(device_name)
|
||||
ns_dev.link.set_address(mac_address)
|
||||
|
||||
if self.conf.network_device_mtu:
|
||||
ns_dev.link.set_mtu(self.conf.network_device_mtu)
|
||||
root_dev.link.set_mtu(self.conf.network_device_mtu)
|
||||
|
||||
if namespace:
|
||||
namespace_obj = ip.ensure_namespace(namespace)
|
||||
namespace_obj.add_device_to_namespace(ns_dev)
|
||||
|
||||
ns_dev.link.set_up()
|
||||
root_dev.link.set_up()
|
||||
else:
|
||||
LOG.info(_("Device %s already exists"), device_name)
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
"""Unplug the interface."""
|
||||
tap_name = self._get_tap_name(device_name, prefix)
|
||||
try:
|
||||
cmd = ['ivs-ctl', 'del-port', tap_name]
|
||||
utils.execute(cmd, self.root_helper)
|
||||
device = ip_lib.IPDevice(device_name,
|
||||
self.root_helper,
|
||||
namespace)
|
||||
device.link.delete()
|
||||
LOG.debug(_("Unplugged interface '%s'"), device_name)
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"),
|
||||
device_name)
|
||||
|
||||
|
||||
class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
||||
"""Driver for creating bridge interfaces."""
|
||||
|
||||
DEV_NAME_PREFIX = 'ns-'
|
||||
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
"""Plugin the interface."""
|
||||
if not ip_lib.device_exists(device_name,
|
||||
self.root_helper,
|
||||
namespace=namespace):
|
||||
ip = ip_lib.IPWrapper(self.root_helper)
|
||||
|
||||
# Enable agent to define the prefix
|
||||
if prefix:
|
||||
tap_name = device_name.replace(prefix, 'tap')
|
||||
else:
|
||||
tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap')
|
||||
# Create ns_veth in a namespace if one is configured.
|
||||
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
|
||||
namespace2=namespace)
|
||||
ns_veth.link.set_address(mac_address)
|
||||
|
||||
if self.conf.network_device_mtu:
|
||||
root_veth.link.set_mtu(self.conf.network_device_mtu)
|
||||
ns_veth.link.set_mtu(self.conf.network_device_mtu)
|
||||
|
||||
root_veth.link.set_up()
|
||||
ns_veth.link.set_up()
|
||||
|
||||
else:
|
||||
LOG.info(_("Device %s already exists"), device_name)
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
"""Unplug the interface."""
|
||||
device = ip_lib.IPDevice(device_name, self.root_helper, namespace)
|
||||
try:
|
||||
device.link.delete()
|
||||
LOG.debug(_("Unplugged interface '%s'"), device_name)
|
||||
except RuntimeError:
|
||||
LOG.error(_("Failed unplugging interface '%s'"),
|
||||
device_name)
|
||||
|
||||
|
||||
class MetaInterfaceDriver(LinuxInterfaceDriver):
|
||||
def __init__(self, conf):
|
||||
super(MetaInterfaceDriver, self).__init__(conf)
|
||||
from neutronclient.v2_0 import client
|
||||
self.neutron = client.Client(
|
||||
username=self.conf.admin_user,
|
||||
password=self.conf.admin_password,
|
||||
tenant_name=self.conf.admin_tenant_name,
|
||||
auth_url=self.conf.auth_url,
|
||||
auth_strategy=self.conf.auth_strategy,
|
||||
region_name=self.conf.auth_region
|
||||
)
|
||||
self.flavor_driver_map = {}
|
||||
for net_flavor, driver_name in [
|
||||
driver_set.split(':')
|
||||
for driver_set in
|
||||
self.conf.meta_flavor_driver_mappings.split(',')]:
|
||||
self.flavor_driver_map[net_flavor] = self._load_driver(driver_name)
|
||||
|
||||
def _get_flavor_by_network_id(self, network_id):
|
||||
network = self.neutron.show_network(network_id)
|
||||
return network['network'][flavor.FLAVOR_NETWORK]
|
||||
|
||||
def _get_driver_by_network_id(self, network_id):
|
||||
net_flavor = self._get_flavor_by_network_id(network_id)
|
||||
return self.flavor_driver_map[net_flavor]
|
||||
|
||||
def _set_device_plugin_tag(self, network_id, device_name, namespace=None):
|
||||
plugin_tag = self._get_flavor_by_network_id(network_id)
|
||||
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
|
||||
device.link.set_alias(plugin_tag)
|
||||
|
||||
def _get_device_plugin_tag(self, device_name, namespace=None):
|
||||
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
|
||||
return device.link.alias
|
||||
|
||||
def get_device_name(self, port):
|
||||
driver = self._get_driver_by_network_id(port.network_id)
|
||||
return driver.get_device_name(port)
|
||||
|
||||
def plug(self, network_id, port_id, device_name, mac_address,
|
||||
bridge=None, namespace=None, prefix=None):
|
||||
driver = self._get_driver_by_network_id(network_id)
|
||||
ret = driver.plug(network_id, port_id, device_name, mac_address,
|
||||
bridge=bridge, namespace=namespace, prefix=prefix)
|
||||
self._set_device_plugin_tag(network_id, device_name, namespace)
|
||||
return ret
|
||||
|
||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||
plugin_tag = self._get_device_plugin_tag(device_name, namespace)
|
||||
driver = self.flavor_driver_map[plugin_tag]
|
||||
return driver.unplug(device_name, bridge, namespace, prefix)
|
||||
|
||||
def _load_driver(self, driver_provider):
|
||||
LOG.debug(_("Driver location: %s"), driver_provider)
|
||||
plugin_klass = importutils.import_class(driver_provider)
|
||||
return plugin_klass(self.conf)
|
|
@ -1,381 +0,0 @@
|
|||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent import firewall
|
||||
from neutron.agent.linux import iptables_manager
|
||||
from neutron.common import constants
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
SG_CHAIN = 'sg-chain'
|
||||
INGRESS_DIRECTION = 'ingress'
|
||||
EGRESS_DIRECTION = 'egress'
|
||||
SPOOF_FILTER = 'spoof-filter'
|
||||
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
|
||||
EGRESS_DIRECTION: 'o',
|
||||
SPOOF_FILTER: 's'}
|
||||
LINUX_DEV_LEN = 14
|
||||
|
||||
|
||||
class IptablesFirewallDriver(firewall.FirewallDriver):
|
||||
"""Driver which enforces security groups through iptables rules."""
|
||||
IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out',
|
||||
EGRESS_DIRECTION: 'physdev-in'}
|
||||
|
||||
def __init__(self):
|
||||
self.iptables = iptables_manager.IptablesManager(
|
||||
root_helper=cfg.CONF.AGENT.root_helper,
|
||||
use_ipv6=True)
|
||||
# list of port which has security group
|
||||
self.filtered_ports = {}
|
||||
self._add_fallback_chain_v4v6()
|
||||
self._defer_apply = False
|
||||
self._pre_defer_filtered_ports = None
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
return self.filtered_ports
|
||||
|
||||
def prepare_port_filter(self, port):
|
||||
LOG.debug(_("Preparing device (%s) filter"), port['device'])
|
||||
self._remove_chains()
|
||||
self.filtered_ports[port['device']] = port
|
||||
# each security group has it own chains
|
||||
self._setup_chains()
|
||||
self.iptables.apply()
|
||||
|
||||
def update_port_filter(self, port):
|
||||
LOG.debug(_("Updating device (%s) filter"), port['device'])
|
||||
if port['device'] not in self.filtered_ports:
|
||||
LOG.info(_('Attempted to update port filter which is not '
|
||||
'filtered %s'), port['device'])
|
||||
return
|
||||
self._remove_chains()
|
||||
self.filtered_ports[port['device']] = port
|
||||
self._setup_chains()
|
||||
self.iptables.apply()
|
||||
|
||||
def remove_port_filter(self, port):
|
||||
LOG.debug(_("Removing device (%s) filter"), port['device'])
|
||||
if not self.filtered_ports.get(port['device']):
|
||||
LOG.info(_('Attempted to remove port filter which is not '
|
||||
'filtered %r'), port)
|
||||
return
|
||||
self._remove_chains()
|
||||
self.filtered_ports.pop(port['device'], None)
|
||||
self._setup_chains()
|
||||
self.iptables.apply()
|
||||
|
||||
def _setup_chains(self):
|
||||
"""Setup ingress and egress chain for a port."""
|
||||
if not self._defer_apply:
|
||||
self._setup_chains_apply(self.filtered_ports)
|
||||
|
||||
def _setup_chains_apply(self, ports):
|
||||
self._add_chain_by_name_v4v6(SG_CHAIN)
|
||||
for port in ports.values():
|
||||
self._setup_chain(port, INGRESS_DIRECTION)
|
||||
self._setup_chain(port, EGRESS_DIRECTION)
|
||||
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
|
||||
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
|
||||
|
||||
def _remove_chains(self):
|
||||
"""Remove ingress and egress chain for a port."""
|
||||
if not self._defer_apply:
|
||||
self._remove_chains_apply(self.filtered_ports)
|
||||
|
||||
def _remove_chains_apply(self, ports):
|
||||
for port in ports.values():
|
||||
self._remove_chain(port, INGRESS_DIRECTION)
|
||||
self._remove_chain(port, EGRESS_DIRECTION)
|
||||
self._remove_chain(port, SPOOF_FILTER)
|
||||
self._remove_chain_by_name_v4v6(SG_CHAIN)
|
||||
|
||||
def _setup_chain(self, port, DIRECTION):
|
||||
self._add_chain(port, DIRECTION)
|
||||
self._add_rule_by_security_group(port, DIRECTION)
|
||||
|
||||
def _remove_chain(self, port, DIRECTION):
|
||||
chain_name = self._port_chain_name(port, DIRECTION)
|
||||
self._remove_chain_by_name_v4v6(chain_name)
|
||||
|
||||
def _add_fallback_chain_v4v6(self):
|
||||
self.iptables.ipv4['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
self.iptables.ipv6['filter'].add_chain('sg-fallback')
|
||||
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
|
||||
|
||||
def _add_chain_by_name_v4v6(self, chain_name):
|
||||
self.iptables.ipv6['filter'].add_chain(chain_name)
|
||||
self.iptables.ipv4['filter'].add_chain(chain_name)
|
||||
|
||||
def _remove_chain_by_name_v4v6(self, chain_name):
|
||||
self.iptables.ipv4['filter'].ensure_remove_chain(chain_name)
|
||||
self.iptables.ipv6['filter'].ensure_remove_chain(chain_name)
|
||||
|
||||
def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
|
||||
for rule in ipv4_rules:
|
||||
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
|
||||
|
||||
for rule in ipv6_rules:
|
||||
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
|
||||
|
||||
def _get_device_name(self, port):
|
||||
return port['device']
|
||||
|
||||
def _add_chain(self, port, direction):
|
||||
chain_name = self._port_chain_name(port, direction)
|
||||
self._add_chain_by_name_v4v6(chain_name)
|
||||
|
||||
# Note(nati) jump to the security group chain (SG_CHAIN)
|
||||
# This is needed because the packet may much two rule in port
|
||||
# if the two port is in the same host
|
||||
# We accept the packet at the end of SG_CHAIN.
|
||||
|
||||
# jump to the security group chain
|
||||
device = self._get_device_name(port)
|
||||
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
|
||||
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
|
||||
device,
|
||||
SG_CHAIN)]
|
||||
self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule)
|
||||
|
||||
# jump to the chain based on the device
|
||||
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
|
||||
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
|
||||
device,
|
||||
chain_name)]
|
||||
self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule)
|
||||
|
||||
if direction == EGRESS_DIRECTION:
|
||||
self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule)
|
||||
|
||||
def _split_sgr_by_ethertype(self, security_group_rules):
|
||||
ipv4_sg_rules = []
|
||||
ipv6_sg_rules = []
|
||||
for rule in security_group_rules:
|
||||
if rule.get('ethertype') == constants.IPv4:
|
||||
ipv4_sg_rules.append(rule)
|
||||
elif rule.get('ethertype') == constants.IPv6:
|
||||
if rule.get('protocol') == 'icmp':
|
||||
rule['protocol'] = 'icmpv6'
|
||||
ipv6_sg_rules.append(rule)
|
||||
return ipv4_sg_rules, ipv6_sg_rules
|
||||
|
||||
def _select_sgr_by_direction(self, port, direction):
|
||||
return [rule
|
||||
for rule in port.get('security_group_rules', [])
|
||||
if rule['direction'] == direction]
|
||||
|
||||
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
|
||||
if mac_ip_pairs:
|
||||
chain_name = self._port_chain_name(port, SPOOF_FILTER)
|
||||
table.add_chain(chain_name)
|
||||
for mac, ip in mac_ip_pairs:
|
||||
if ip is None:
|
||||
# If fixed_ips is [] this rule will be added to the end
|
||||
# of the list after the allowed_address_pair rules.
|
||||
table.add_rule(chain_name,
|
||||
'-m mac --mac-source %s -j RETURN'
|
||||
% mac)
|
||||
else:
|
||||
table.add_rule(chain_name,
|
||||
'-m mac --mac-source %s -s %s -j RETURN'
|
||||
% (mac, ip))
|
||||
table.add_rule(chain_name, '-j DROP')
|
||||
rules.append('-j $%s' % chain_name)
|
||||
|
||||
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
|
||||
mac_ipv6_pairs):
|
||||
if netaddr.IPNetwork(ip_address).version == 4:
|
||||
mac_ipv4_pairs.append((mac, ip_address))
|
||||
else:
|
||||
mac_ipv6_pairs.append((mac, ip_address))
|
||||
|
||||
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
|
||||
#Note(nati) allow dhcp or RA packet
|
||||
ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN']
|
||||
ipv6_rules += ['-p icmpv6 -j RETURN']
|
||||
ipv6_rules += ['-p udp -m udp --sport 546 --dport 547 -j RETURN']
|
||||
mac_ipv4_pairs = []
|
||||
mac_ipv6_pairs = []
|
||||
|
||||
if isinstance(port.get('allowed_address_pairs'), list):
|
||||
for address_pair in port['allowed_address_pairs']:
|
||||
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
|
||||
address_pair['ip_address'],
|
||||
mac_ipv4_pairs,
|
||||
mac_ipv6_pairs)
|
||||
|
||||
for ip in port['fixed_ips']:
|
||||
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
|
||||
mac_ipv4_pairs, mac_ipv6_pairs)
|
||||
if not port['fixed_ips']:
|
||||
mac_ipv4_pairs.append((port['mac_address'], None))
|
||||
mac_ipv6_pairs.append((port['mac_address'], None))
|
||||
|
||||
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
|
||||
mac_ipv4_pairs, ipv4_rules)
|
||||
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
|
||||
mac_ipv6_pairs, ipv6_rules)
|
||||
|
||||
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
|
||||
#Note(nati) Drop dhcp packet from VM
|
||||
ipv4_rules += ['-p udp -m udp --sport 67 --dport 68 -j DROP']
|
||||
ipv6_rules += ['-p udp -m udp --sport 547 --dport 546 -j DROP']
|
||||
|
||||
def _accept_inbound_icmpv6(self):
|
||||
# Allow multicast listener, neighbor solicitation and
|
||||
# neighbor advertisement into the instance
|
||||
icmpv6_rules = []
|
||||
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
|
||||
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
|
||||
icmp6_type]
|
||||
return icmpv6_rules
|
||||
|
||||
def _add_rule_by_security_group(self, port, direction):
|
||||
chain_name = self._port_chain_name(port, direction)
|
||||
# select rules for current direction
|
||||
security_group_rules = self._select_sgr_by_direction(port, direction)
|
||||
# split groups by ip version
|
||||
# for ipv4, iptables command is used
|
||||
# for ipv6, iptables6 command is used
|
||||
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
|
||||
security_group_rules)
|
||||
ipv4_iptables_rule = []
|
||||
ipv6_iptables_rule = []
|
||||
if direction == EGRESS_DIRECTION:
|
||||
self._spoofing_rule(port,
|
||||
ipv4_iptables_rule,
|
||||
ipv6_iptables_rule)
|
||||
self._drop_dhcp_rule(ipv4_iptables_rule, ipv6_iptables_rule)
|
||||
if direction == INGRESS_DIRECTION:
|
||||
ipv6_iptables_rule += self._accept_inbound_icmpv6()
|
||||
ipv4_iptables_rule += self._convert_sgr_to_iptables_rules(
|
||||
ipv4_sg_rules)
|
||||
ipv6_iptables_rule += self._convert_sgr_to_iptables_rules(
|
||||
ipv6_sg_rules)
|
||||
self._add_rule_to_chain_v4v6(chain_name,
|
||||
ipv4_iptables_rule,
|
||||
ipv6_iptables_rule)
|
||||
|
||||
def _convert_sgr_to_iptables_rules(self, security_group_rules):
|
||||
iptables_rules = []
|
||||
self._drop_invalid_packets(iptables_rules)
|
||||
self._allow_established(iptables_rules)
|
||||
for rule in security_group_rules:
|
||||
# These arguments MUST be in the format iptables-save will
|
||||
# display them: source/dest, protocol, sport, dport, target
|
||||
# Otherwise the iptables_manager code won't be able to find
|
||||
# them to preserve their [packet:byte] counts.
|
||||
args = self._ip_prefix_arg('s',
|
||||
rule.get('source_ip_prefix'))
|
||||
args += self._ip_prefix_arg('d',
|
||||
rule.get('dest_ip_prefix'))
|
||||
args += self._protocol_arg(rule.get('protocol'))
|
||||
args += self._port_arg('sport',
|
||||
rule.get('protocol'),
|
||||
rule.get('source_port_range_min'),
|
||||
rule.get('source_port_range_max'))
|
||||
args += self._port_arg('dport',
|
||||
rule.get('protocol'),
|
||||
rule.get('port_range_min'),
|
||||
rule.get('port_range_max'))
|
||||
args += ['-j RETURN']
|
||||
iptables_rules += [' '.join(args)]
|
||||
|
||||
iptables_rules += ['-j $sg-fallback']
|
||||
|
||||
return iptables_rules
|
||||
|
||||
def _drop_invalid_packets(self, iptables_rules):
|
||||
# Always drop invalid packets
|
||||
iptables_rules += ['-m state --state ' 'INVALID -j DROP']
|
||||
return iptables_rules
|
||||
|
||||
def _allow_established(self, iptables_rules):
|
||||
# Allow established connections
|
||||
iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN']
|
||||
return iptables_rules
|
||||
|
||||
def _protocol_arg(self, protocol):
|
||||
if not protocol:
|
||||
return []
|
||||
|
||||
iptables_rule = ['-p', protocol]
|
||||
# iptables always adds '-m protocol' for udp and tcp
|
||||
if protocol in ['udp', 'tcp']:
|
||||
iptables_rule += ['-m', protocol]
|
||||
return iptables_rule
|
||||
|
||||
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
|
||||
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
|
||||
or not port_range_min):
|
||||
return []
|
||||
|
||||
if protocol in ['icmp', 'icmpv6']:
|
||||
# Note(xuhanp): port_range_min/port_range_max represent
|
||||
# icmp type/code when protocol is icmp or icmpv6
|
||||
# icmp code can be 0 so we cannot use "if port_range_max" here
|
||||
if port_range_max is not None:
|
||||
return ['--%s-type' % protocol,
|
||||
'%s/%s' % (port_range_min, port_range_max)]
|
||||
return ['--%s-type' % protocol, '%s' % port_range_min]
|
||||
elif port_range_min == port_range_max:
|
||||
return ['--%s' % direction, '%s' % (port_range_min,)]
|
||||
else:
|
||||
return ['-m', 'multiport',
|
||||
'--%ss' % direction,
|
||||
'%s:%s' % (port_range_min, port_range_max)]
|
||||
|
||||
def _ip_prefix_arg(self, direction, ip_prefix):
|
||||
#NOTE (nati) : source_group_id is converted to list of source_
|
||||
# ip_prefix in server side
|
||||
if ip_prefix:
|
||||
return ['-%s' % direction, ip_prefix]
|
||||
return []
|
||||
|
||||
def _port_chain_name(self, port, direction):
|
||||
return iptables_manager.get_chain_name(
|
||||
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
|
||||
|
||||
def filter_defer_apply_on(self):
|
||||
if not self._defer_apply:
|
||||
self.iptables.defer_apply_on()
|
||||
self._pre_defer_filtered_ports = dict(self.filtered_ports)
|
||||
self._defer_apply = True
|
||||
|
||||
def filter_defer_apply_off(self):
|
||||
if self._defer_apply:
|
||||
self._defer_apply = False
|
||||
self._remove_chains_apply(self._pre_defer_filtered_ports)
|
||||
self._pre_defer_filtered_ports = None
|
||||
self._setup_chains_apply(self.filtered_ports)
|
||||
self.iptables.defer_apply_off()
|
||||
|
||||
|
||||
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
|
||||
OVS_HYBRID_TAP_PREFIX = 'tap'
|
||||
|
||||
def _port_chain_name(self, port, direction):
|
||||
return iptables_manager.get_chain_name(
|
||||
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
|
||||
|
||||
def _get_device_name(self, port):
|
||||
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
|
|
@ -1,666 +0,0 @@
|
|||
# Copyright 2012 Locaweb.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# @author: Juliano Martinez, Locaweb.
|
||||
# based on
|
||||
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
|
||||
|
||||
"""Implements iptables rules using linux utilities."""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
|
||||
from neutron.agent.linux import utils as linux_utils
|
||||
from neutron.common import utils
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import lockutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
|
||||
# add up to 12 characters to binary_name which is used as a prefix,
|
||||
# so we limit it to 16 characters.
|
||||
# (max_chain_name_length - len('-POSTROUTING') == 16)
|
||||
def get_binary_name():
|
||||
"""Grab the name of the binary we're running in."""
|
||||
return os.path.basename(inspect.stack()[-1][1])[:16]
|
||||
|
||||
binary_name = get_binary_name()
|
||||
|
||||
# A length of a chain name must be less than or equal to 11 characters.
|
||||
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
|
||||
MAX_CHAIN_LEN_WRAP = 11
|
||||
MAX_CHAIN_LEN_NOWRAP = 28
|
||||
|
||||
# Number of iptables rules to print before and after a rule that causes a
|
||||
# a failure during iptables-restore
|
||||
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
|
||||
|
||||
|
||||
def get_chain_name(chain_name, wrap=True):
|
||||
if wrap:
|
||||
return chain_name[:MAX_CHAIN_LEN_WRAP]
|
||||
else:
|
||||
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
|
||||
|
||||
|
||||
class IptablesRule(object):
|
||||
"""An iptables rule.
|
||||
|
||||
You shouldn't need to use this class directly, it's only used by
|
||||
IptablesManager.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, chain, rule, wrap=True, top=False,
|
||||
binary_name=binary_name, tag=None):
|
||||
self.chain = get_chain_name(chain, wrap)
|
||||
self.rule = rule
|
||||
self.wrap = wrap
|
||||
self.top = top
|
||||
self.wrap_name = binary_name[:16]
|
||||
self.tag = tag
|
||||
|
||||
def __eq__(self, other):
|
||||
return ((self.chain == other.chain) and
|
||||
(self.rule == other.rule) and
|
||||
(self.top == other.top) and
|
||||
(self.wrap == other.wrap))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __str__(self):
|
||||
if self.wrap:
|
||||
chain = '%s-%s' % (self.wrap_name, self.chain)
|
||||
else:
|
||||
chain = self.chain
|
||||
return '-A %s %s' % (chain, self.rule)
|
||||
|
||||
|
||||
class IptablesTable(object):
|
||||
"""An iptables table."""
|
||||
|
||||
def __init__(self, binary_name=binary_name):
|
||||
self.rules = []
|
||||
self.remove_rules = []
|
||||
self.chains = set()
|
||||
self.unwrapped_chains = set()
|
||||
self.remove_chains = set()
|
||||
self.wrap_name = binary_name[:16]
|
||||
|
||||
def add_chain(self, name, wrap=True):
|
||||
"""Adds a named chain to the table.
|
||||
|
||||
The chain name is wrapped to be unique for the component creating
|
||||
it, so different components of Nova can safely create identically
|
||||
named chains without interfering with one another.
|
||||
|
||||
At the moment, its wrapped name is <binary name>-<chain name>,
|
||||
so if nova-compute creates a chain named 'OUTPUT', it'll actually
|
||||
end up named 'nova-compute-OUTPUT'.
|
||||
|
||||
"""
|
||||
name = get_chain_name(name, wrap)
|
||||
if wrap:
|
||||
self.chains.add(name)
|
||||
else:
|
||||
self.unwrapped_chains.add(name)
|
||||
|
||||
def _select_chain_set(self, wrap):
|
||||
if wrap:
|
||||
return self.chains
|
||||
else:
|
||||
return self.unwrapped_chains
|
||||
|
||||
def ensure_remove_chain(self, name, wrap=True):
|
||||
"""Ensure the chain is removed.
|
||||
|
||||
This removal "cascades". All rule in the chain are removed, as are
|
||||
all rules in other chains that jump to it.
|
||||
"""
|
||||
name = get_chain_name(name, wrap)
|
||||
chain_set = self._select_chain_set(wrap)
|
||||
if name not in chain_set:
|
||||
return
|
||||
|
||||
self.remove_chain(name, wrap)
|
||||
|
||||
def remove_chain(self, name, wrap=True):
|
||||
"""Remove named chain.
|
||||
|
||||
This removal "cascades". All rule in the chain are removed, as are
|
||||
all rules in other chains that jump to it.
|
||||
|
||||
If the chain is not found, this is merely logged.
|
||||
|
||||
"""
|
||||
name = get_chain_name(name, wrap)
|
||||
chain_set = self._select_chain_set(wrap)
|
||||
|
||||
if name not in chain_set:
|
||||
LOG.warn(_('Attempted to remove chain %s which does not exist'),
|
||||
name)
|
||||
return
|
||||
|
||||
chain_set.remove(name)
|
||||
|
||||
if not wrap:
|
||||
# non-wrapped chains and rules need to be dealt with specially,
|
||||
# so we keep a list of them to be iterated over in apply()
|
||||
self.remove_chains.add(name)
|
||||
|
||||
# first, add rules to remove that have a matching chain name
|
||||
self.remove_rules += [r for r in self.rules if r.chain == name]
|
||||
|
||||
# next, remove rules from list that have a matching chain name
|
||||
self.rules = [r for r in self.rules if r.chain != name]
|
||||
|
||||
if not wrap:
|
||||
jump_snippet = '-j %s' % name
|
||||
# next, add rules to remove that have a matching jump chain
|
||||
self.remove_rules += [r for r in self.rules
|
||||
if jump_snippet in r.rule]
|
||||
else:
|
||||
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
|
||||
|
||||
# finally, remove rules from list that have a matching jump chain
|
||||
self.rules = [r for r in self.rules
|
||||
if jump_snippet not in r.rule]
|
||||
|
||||
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
|
||||
"""Add a rule to the table.
|
||||
|
||||
This is just like what you'd feed to iptables, just without
|
||||
the '-A <chain name>' bit at the start.
|
||||
|
||||
However, if you need to jump to one of your wrapped chains,
|
||||
prepend its name with a '$' which will ensure the wrapping
|
||||
is applied correctly.
|
||||
|
||||
"""
|
||||
chain = get_chain_name(chain, wrap)
|
||||
if wrap and chain not in self.chains:
|
||||
raise LookupError(_('Unknown chain: %r') % chain)
|
||||
|
||||
if '$' in rule:
|
||||
rule = ' '.join(
|
||||
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
|
||||
|
||||
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
|
||||
tag))
|
||||
|
||||
def _wrap_target_chain(self, s, wrap):
|
||||
if s.startswith('$'):
|
||||
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
|
||||
|
||||
return s
|
||||
|
||||
def remove_rule(self, chain, rule, wrap=True, top=False):
|
||||
"""Remove a rule from a chain.
|
||||
|
||||
Note: The rule must be exactly identical to the one that was added.
|
||||
You cannot switch arguments around like you can with the iptables
|
||||
CLI tool.
|
||||
|
||||
"""
|
||||
chain = get_chain_name(chain, wrap)
|
||||
try:
|
||||
if '$' in rule:
|
||||
rule = ' '.join(
|
||||
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
|
||||
|
||||
self.rules.remove(IptablesRule(chain, rule, wrap, top,
|
||||
self.wrap_name))
|
||||
if not wrap:
|
||||
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
|
||||
self.wrap_name))
|
||||
except ValueError:
|
||||
LOG.warn(_('Tried to remove rule that was not there:'
|
||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
||||
{'chain': chain, 'rule': rule,
|
||||
'top': top, 'wrap': wrap})
|
||||
|
||||
def empty_chain(self, chain, wrap=True):
|
||||
"""Remove all rules from a chain."""
|
||||
chain = get_chain_name(chain, wrap)
|
||||
chained_rules = [rule for rule in self.rules
|
||||
if rule.chain == chain and rule.wrap == wrap]
|
||||
for rule in chained_rules:
|
||||
self.rules.remove(rule)
|
||||
|
||||
def clear_rules_by_tag(self, tag):
|
||||
if not tag:
|
||||
return
|
||||
rules = [rule for rule in self.rules if rule.tag == tag]
|
||||
for rule in rules:
|
||||
self.rules.remove(rule)
|
||||
|
||||
|
||||
class IptablesManager(object):
|
||||
"""Wrapper for iptables.
|
||||
|
||||
See IptablesTable for some usage docs
|
||||
|
||||
A number of chains are set up to begin with.
|
||||
|
||||
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
|
||||
name is not wrapped, so it's shared between the various nova workers. It's
|
||||
intended for rules that need to live at the top of the FORWARD and OUTPUT
|
||||
chains. It's in both the ipv4 and ipv6 set of tables.
|
||||
|
||||
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
|
||||
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
|
||||
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
|
||||
"local" which is jumped to from neutron-filter-top.
|
||||
|
||||
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
|
||||
wrapped in the same was as the built-in filter chains. Additionally,
|
||||
there's a snat chain that is applied after the POSTROUTING chain.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, _execute=None, state_less=False,
|
||||
root_helper=None, use_ipv6=False, namespace=None,
|
||||
binary_name=binary_name):
|
||||
if _execute:
|
||||
self.execute = _execute
|
||||
else:
|
||||
self.execute = linux_utils.execute
|
||||
|
||||
self.use_ipv6 = use_ipv6
|
||||
self.root_helper = root_helper
|
||||
self.namespace = namespace
|
||||
self.iptables_apply_deferred = False
|
||||
self.wrap_name = binary_name[:16]
|
||||
|
||||
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
|
||||
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
|
||||
|
||||
# Add a neutron-filter-top chain. It's intended to be shared
|
||||
# among the various nova components. It sits at the very top
|
||||
# of FORWARD and OUTPUT.
|
||||
for tables in [self.ipv4, self.ipv6]:
|
||||
tables['filter'].add_chain('neutron-filter-top', wrap=False)
|
||||
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
|
||||
wrap=False, top=True)
|
||||
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
|
||||
wrap=False, top=True)
|
||||
|
||||
tables['filter'].add_chain('local')
|
||||
tables['filter'].add_rule('neutron-filter-top', '-j $local',
|
||||
wrap=False)
|
||||
|
||||
# Wrap the built-in chains
|
||||
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
|
||||
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
|
||||
|
||||
if not state_less:
|
||||
self.ipv4.update(
|
||||
{'nat': IptablesTable(binary_name=self.wrap_name)})
|
||||
builtin_chains[4].update({'nat': ['PREROUTING',
|
||||
'OUTPUT', 'POSTROUTING']})
|
||||
|
||||
for ip_version in builtin_chains:
|
||||
if ip_version == 4:
|
||||
tables = self.ipv4
|
||||
elif ip_version == 6:
|
||||
tables = self.ipv6
|
||||
|
||||
for table, chains in builtin_chains[ip_version].iteritems():
|
||||
for chain in chains:
|
||||
tables[table].add_chain(chain)
|
||||
tables[table].add_rule(chain, '-j $%s' %
|
||||
(chain), wrap=False)
|
||||
|
||||
if not state_less:
|
||||
# Add a neutron-postrouting-bottom chain. It's intended to be
|
||||
# shared among the various nova components. We set it as the last
|
||||
# chain of POSTROUTING chain.
|
||||
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
|
||||
wrap=False)
|
||||
self.ipv4['nat'].add_rule('POSTROUTING',
|
||||
'-j neutron-postrouting-bottom',
|
||||
wrap=False)
|
||||
|
||||
# We add a snat chain to the shared neutron-postrouting-bottom
|
||||
# chain so that it's applied last.
|
||||
self.ipv4['nat'].add_chain('snat')
|
||||
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
|
||||
'-j $snat', wrap=False)
|
||||
|
||||
# And then we add a float-snat chain and jump to first thing in
|
||||
# the snat chain.
|
||||
self.ipv4['nat'].add_chain('float-snat')
|
||||
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
|
||||
|
||||
def defer_apply_on(self):
|
||||
self.iptables_apply_deferred = True
|
||||
|
||||
def defer_apply_off(self):
|
||||
self.iptables_apply_deferred = False
|
||||
self._apply()
|
||||
|
||||
def apply(self):
|
||||
if self.iptables_apply_deferred:
|
||||
return
|
||||
|
||||
self._apply()
|
||||
|
||||
def _apply(self):
|
||||
lock_name = 'iptables'
|
||||
if self.namespace:
|
||||
lock_name += '-' + self.namespace
|
||||
|
||||
try:
|
||||
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
|
||||
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
|
||||
return self._apply_synchronized()
|
||||
finally:
|
||||
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
|
||||
|
||||
def _apply_synchronized(self):
|
||||
"""Apply the current in-memory set of iptables rules.
|
||||
|
||||
This will blow away any rules left over from previous runs of the
|
||||
same component of Nova, and replace them with our current set of
|
||||
rules. This happens atomically, thanks to iptables-restore.
|
||||
|
||||
"""
|
||||
s = [('iptables', self.ipv4)]
|
||||
if self.use_ipv6:
|
||||
s += [('ip6tables', self.ipv6)]
|
||||
|
||||
for cmd, tables in s:
|
||||
args = ['%s-save' % (cmd,), '-c']
|
||||
if self.namespace:
|
||||
args = ['ip', 'netns', 'exec', self.namespace] + args
|
||||
all_tables = self.execute(args, root_helper=self.root_helper)
|
||||
all_lines = all_tables.split('\n')
|
||||
for table_name, table in tables.iteritems():
|
||||
start, end = self._find_table(all_lines, table_name)
|
||||
all_lines[start:end] = self._modify_rules(
|
||||
all_lines[start:end], table, table_name)
|
||||
|
||||
args = ['%s-restore' % (cmd,), '-c']
|
||||
if self.namespace:
|
||||
args = ['ip', 'netns', 'exec', self.namespace] + args
|
||||
try:
|
||||
self.execute(args, process_input='\n'.join(all_lines),
|
||||
root_helper=self.root_helper)
|
||||
except RuntimeError as r_error:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
line_no = int(re.search(
|
||||
'iptables-restore: line ([0-9]+?) failed',
|
||||
str(r_error)).group(1))
|
||||
context = IPTABLES_ERROR_LINES_OF_CONTEXT
|
||||
log_start = max(0, line_no - context)
|
||||
log_end = line_no + context
|
||||
except AttributeError:
|
||||
# line error wasn't found, print all lines instead
|
||||
log_start = 0
|
||||
log_end = len(all_lines)
|
||||
log_lines = ('%7d. %s' % (idx, l)
|
||||
for idx, l in enumerate(
|
||||
all_lines[log_start:log_end],
|
||||
log_start + 1)
|
||||
)
|
||||
LOG.error(_("IPTablesManager.apply failed to apply the "
|
||||
"following set of iptables rules:\n%s"),
|
||||
'\n'.join(log_lines))
|
||||
LOG.debug(_("IPTablesManager.apply completed with success"))
|
||||
|
||||
def _find_table(self, lines, table_name):
|
||||
if len(lines) < 3:
|
||||
# length only <2 when fake iptables
|
||||
return (0, 0)
|
||||
try:
|
||||
start = lines.index('*%s' % table_name) - 1
|
||||
except ValueError:
|
||||
# Couldn't find table_name
|
||||
LOG.debug(_('Unable to find table %s'), table_name)
|
||||
return (0, 0)
|
||||
end = lines[start:].index('COMMIT') + start + 2
|
||||
return (start, end)
|
||||
|
||||
def _find_rules_index(self, lines):
|
||||
seen_chains = False
|
||||
rules_index = 0
|
||||
for rules_index, rule in enumerate(lines):
|
||||
if not seen_chains:
|
||||
if rule.startswith(':'):
|
||||
seen_chains = True
|
||||
else:
|
||||
if not rule.startswith(':'):
|
||||
break
|
||||
|
||||
if not seen_chains:
|
||||
rules_index = 2
|
||||
|
||||
return rules_index
|
||||
|
||||
def _find_last_entry(self, filter_list, match_str):
|
||||
# find a matching entry, starting from the bottom
|
||||
for s in reversed(filter_list):
|
||||
s = s.strip()
|
||||
if match_str in s:
|
||||
return s
|
||||
|
||||
def _modify_rules(self, current_lines, table, table_name):
|
||||
unwrapped_chains = table.unwrapped_chains
|
||||
chains = table.chains
|
||||
remove_chains = table.remove_chains
|
||||
rules = table.rules
|
||||
remove_rules = table.remove_rules
|
||||
|
||||
if not current_lines:
|
||||
fake_table = ['# Generated by iptables_manager',
|
||||
'*' + table_name, 'COMMIT',
|
||||
'# Completed by iptables_manager']
|
||||
current_lines = fake_table
|
||||
|
||||
# Fill old_filter with any chains or rules we might have added,
|
||||
# they could have a [packet:byte] count we want to preserve.
|
||||
# Fill new_filter with any chains or rules without our name in them.
|
||||
old_filter, new_filter = [], []
|
||||
for line in current_lines:
|
||||
(old_filter if self.wrap_name in line else
|
||||
new_filter).append(line.strip())
|
||||
|
||||
rules_index = self._find_rules_index(new_filter)
|
||||
|
||||
all_chains = [':%s' % name for name in unwrapped_chains]
|
||||
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
|
||||
|
||||
# Iterate through all the chains, trying to find an existing
|
||||
# match.
|
||||
our_chains = []
|
||||
for chain in all_chains:
|
||||
chain_str = str(chain).strip()
|
||||
|
||||
old = self._find_last_entry(old_filter, chain_str)
|
||||
if not old:
|
||||
dup = self._find_last_entry(new_filter, chain_str)
|
||||
new_filter = [s for s in new_filter if chain_str not in s.strip()]
|
||||
|
||||
# if no old or duplicates, use original chain
|
||||
if old or dup:
|
||||
chain_str = str(old or dup)
|
||||
else:
|
||||
# add-on the [packet:bytes]
|
||||
chain_str += ' - [0:0]'
|
||||
|
||||
our_chains += [chain_str]
|
||||
|
||||
# Iterate through all the rules, trying to find an existing
|
||||
# match.
|
||||
our_rules = []
|
||||
bot_rules = []
|
||||
for rule in rules:
|
||||
rule_str = str(rule).strip()
|
||||
# Further down, we weed out duplicates from the bottom of the
|
||||
# list, so here we remove the dupes ahead of time.
|
||||
|
||||
old = self._find_last_entry(old_filter, rule_str)
|
||||
if not old:
|
||||
dup = self._find_last_entry(new_filter, rule_str)
|
||||
new_filter = [s for s in new_filter if rule_str not in s.strip()]
|
||||
|
||||
# if no old or duplicates, use original rule
|
||||
if old or dup:
|
||||
rule_str = str(old or dup)
|
||||
# backup one index so we write the array correctly
|
||||
if not old:
|
||||
rules_index -= 1
|
||||
else:
|
||||
# add-on the [packet:bytes]
|
||||
rule_str = '[0:0] ' + rule_str
|
||||
|
||||
if rule.top:
|
||||
# rule.top == True means we want this rule to be at the top.
|
||||
our_rules += [rule_str]
|
||||
else:
|
||||
bot_rules += [rule_str]
|
||||
|
||||
our_rules += bot_rules
|
||||
|
||||
new_filter[rules_index:rules_index] = our_rules
|
||||
new_filter[rules_index:rules_index] = our_chains
|
||||
|
||||
def _strip_packets_bytes(line):
|
||||
# strip any [packet:byte] counts at start or end of lines
|
||||
if line.startswith(':'):
|
||||
# it's a chain, for example, ":neutron-billing - [0:0]"
|
||||
line = line.split(':')[1]
|
||||
line = line.split(' - [', 1)[0]
|
||||
elif line.startswith('['):
|
||||
# it's a rule, for example, "[0:0] -A neutron-billing..."
|
||||
line = line.split('] ', 1)[1]
|
||||
line = line.strip()
|
||||
return line
|
||||
|
||||
seen_chains = set()
|
||||
|
||||
def _weed_out_duplicate_chains(line):
|
||||
# ignore [packet:byte] counts at end of lines
|
||||
if line.startswith(':'):
|
||||
line = _strip_packets_bytes(line)
|
||||
if line in seen_chains:
|
||||
return False
|
||||
else:
|
||||
seen_chains.add(line)
|
||||
|
||||
# Leave it alone
|
||||
return True
|
||||
|
||||
seen_rules = set()
|
||||
|
||||
def _weed_out_duplicate_rules(line):
|
||||
if line.startswith('['):
|
||||
line = _strip_packets_bytes(line)
|
||||
if line in seen_rules:
|
||||
return False
|
||||
else:
|
||||
seen_rules.add(line)
|
||||
|
||||
# Leave it alone
|
||||
return True
|
||||
|
||||
def _weed_out_removes(line):
|
||||
# We need to find exact matches here
|
||||
if line.startswith(':'):
|
||||
line = _strip_packets_bytes(line)
|
||||
for chain in remove_chains:
|
||||
if chain == line:
|
||||
remove_chains.remove(chain)
|
||||
return False
|
||||
elif line.startswith('['):
|
||||
line = _strip_packets_bytes(line)
|
||||
for rule in remove_rules:
|
||||
rule_str = _strip_packets_bytes(str(rule))
|
||||
if rule_str == line:
|
||||
remove_rules.remove(rule)
|
||||
return False
|
||||
|
||||
# Leave it alone
|
||||
return True
|
||||
|
||||
# We filter duplicates. Go through the chains and rules, letting
|
||||
# the *last* occurrence take precendence since it could have a
|
||||
# non-zero [packet:byte] count we want to preserve. We also filter
|
||||
# out anything in the "remove" list.
|
||||
new_filter.reverse()
|
||||
new_filter = [line for line in new_filter
|
||||
if _weed_out_duplicate_chains(line) and
|
||||
_weed_out_duplicate_rules(line) and
|
||||
_weed_out_removes(line)]
|
||||
new_filter.reverse()
|
||||
|
||||
# flush lists, just in case we didn't find something
|
||||
remove_chains.clear()
|
||||
for rule in remove_rules:
|
||||
remove_rules.remove(rule)
|
||||
|
||||
return new_filter
|
||||
|
||||
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
|
||||
name = get_chain_name(chain, wrap)
|
||||
|
||||
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
|
||||
if name in table._select_chain_set(wrap)]
|
||||
|
||||
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
|
||||
if name in table._select_chain_set(wrap)]
|
||||
|
||||
return cmd_tables
|
||||
|
||||
def get_traffic_counters(self, chain, wrap=True, zero=False):
|
||||
"""Return the sum of the traffic counters of all rules of a chain."""
|
||||
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
|
||||
if not cmd_tables:
|
||||
LOG.warn(_('Attempted to get traffic counters of chain %s which '
|
||||
'does not exist'), chain)
|
||||
return
|
||||
|
||||
name = get_chain_name(chain, wrap)
|
||||
acc = {'pkts': 0, 'bytes': 0}
|
||||
|
||||
for cmd, table in cmd_tables:
|
||||
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
|
||||
if zero:
|
||||
args.append('-Z')
|
||||
if self.namespace:
|
||||
args = ['ip', 'netns', 'exec', self.namespace] + args
|
||||
current_table = (self.execute(args,
|
||||
root_helper=self.root_helper))
|
||||
current_lines = current_table.split('\n')
|
||||
|
||||
for line in current_lines[2:]:
|
||||
if not line:
|
||||
break
|
||||
data = line.split()
|
||||
if (len(data) < 2 or
|
||||
not data[0].isdigit() or
|
||||
not data[1].isdigit()):
|
||||
break
|
||||
|
||||
acc['pkts'] += int(data[0])
|
||||
acc['bytes'] += int(data[1])
|
||||
|
||||
return acc
|
|
@ -1,564 +0,0 @@
|
|||
# Copyright 2011 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import jsonutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as p_const
|
||||
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
|
||||
# Default timeout for ovs-vsctl command
|
||||
DEFAULT_OVS_VSCTL_TIMEOUT = 10
|
||||
OPTS = [
|
||||
cfg.IntOpt('ovs_vsctl_timeout',
|
||||
default=DEFAULT_OVS_VSCTL_TIMEOUT,
|
||||
help=_('Timeout in seconds for ovs-vsctl commands')),
|
||||
]
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VifPort:
|
||||
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
|
||||
self.port_name = port_name
|
||||
self.ofport = ofport
|
||||
self.vif_id = vif_id
|
||||
self.vif_mac = vif_mac
|
||||
self.switch = switch
|
||||
|
||||
def __str__(self):
|
||||
return ("iface-id=" + self.vif_id + ", vif_mac=" +
|
||||
self.vif_mac + ", port_name=" + self.port_name +
|
||||
", ofport=" + str(self.ofport) + ", bridge_name=" +
|
||||
self.switch.br_name)
|
||||
|
||||
|
||||
class BaseOVS(object):
|
||||
|
||||
def __init__(self, root_helper):
|
||||
self.root_helper = root_helper
|
||||
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
|
||||
|
||||
def run_vsctl(self, args, check_error=False):
|
||||
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
|
||||
try:
|
||||
return utils.execute(full_args, root_helper=self.root_helper)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
LOG.error(_("Unable to execute %(cmd)s. "
|
||||
"Exception: %(exception)s"),
|
||||
{'cmd': full_args, 'exception': e})
|
||||
if not check_error:
|
||||
ctxt.reraise = False
|
||||
|
||||
def add_bridge(self, bridge_name):
|
||||
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
|
||||
return OVSBridge(bridge_name, self.root_helper)
|
||||
|
||||
def delete_bridge(self, bridge_name):
|
||||
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
|
||||
|
||||
def bridge_exists(self, bridge_name):
|
||||
try:
|
||||
self.run_vsctl(['br-exists', bridge_name], check_error=True)
|
||||
except RuntimeError as e:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if 'Exit code: 2\n' in str(e):
|
||||
ctxt.reraise = False
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_bridge_name_for_port_name(self, port_name):
|
||||
try:
|
||||
return self.run_vsctl(['port-to-br', port_name], check_error=True)
|
||||
except RuntimeError as e:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if 'Exit code: 1\n' in str(e):
|
||||
ctxt.reraise = False
|
||||
|
||||
def port_exists(self, port_name):
|
||||
return bool(self.get_bridge_name_for_port_name(port_name))
|
||||
|
||||
|
||||
class OVSBridge(BaseOVS):
|
||||
def __init__(self, br_name, root_helper):
|
||||
super(OVSBridge, self).__init__(root_helper)
|
||||
self.br_name = br_name
|
||||
self.defer_apply_flows = False
|
||||
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
|
||||
|
||||
def set_controller(self, controller_names):
|
||||
vsctl_command = ['--', 'set-controller', self.br_name]
|
||||
vsctl_command.extend(controller_names)
|
||||
self.run_vsctl(vsctl_command, check_error=True)
|
||||
|
||||
def del_controller(self):
|
||||
self.run_vsctl(['--', 'del-controller', self.br_name],
|
||||
check_error=True)
|
||||
|
||||
def get_controller(self):
|
||||
res = self.run_vsctl(['--', 'get-controller', self.br_name],
|
||||
check_error=True)
|
||||
if res:
|
||||
return res.strip().split('\n')
|
||||
return res
|
||||
|
||||
def set_secure_mode(self):
|
||||
self.run_vsctl(['--', 'set-fail-mode', self.br_name, 'secure'],
|
||||
check_error=True)
|
||||
|
||||
def set_protocols(self, protocols):
|
||||
self.run_vsctl(['--', 'set', 'bridge', self.br_name,
|
||||
"protocols=%s" % protocols],
|
||||
check_error=True)
|
||||
|
||||
def create(self):
|
||||
self.add_bridge(self.br_name)
|
||||
|
||||
def destroy(self):
|
||||
self.delete_bridge(self.br_name)
|
||||
|
||||
def reset_bridge(self):
|
||||
self.destroy()
|
||||
self.create()
|
||||
|
||||
def add_port(self, port_name):
|
||||
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
|
||||
port_name])
|
||||
return self.get_port_ofport(port_name)
|
||||
|
||||
def delete_port(self, port_name):
|
||||
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
|
||||
port_name])
|
||||
|
||||
def set_db_attribute(self, table_name, record, column, value):
|
||||
args = ["set", table_name, record, "%s=%s" % (column, value)]
|
||||
self.run_vsctl(args)
|
||||
|
||||
def clear_db_attribute(self, table_name, record, column):
|
||||
args = ["clear", table_name, record, column]
|
||||
self.run_vsctl(args)
|
||||
|
||||
def run_ofctl(self, cmd, args, process_input=None):
|
||||
full_args = ["ovs-ofctl", cmd, self.br_name] + args
|
||||
try:
|
||||
return utils.execute(full_args, root_helper=self.root_helper,
|
||||
process_input=process_input)
|
||||
except Exception as e:
|
||||
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
|
||||
{'cmd': full_args, 'exception': e})
|
||||
|
||||
def count_flows(self):
|
||||
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
|
||||
return len(flow_list) - 1
|
||||
|
||||
def remove_all_flows(self):
|
||||
self.run_ofctl("del-flows", [])
|
||||
|
||||
def get_port_ofport(self, port_name):
|
||||
ofport = self.db_get_val("Interface", port_name, "ofport")
|
||||
# This can return a non-integer string, like '[]' so ensure a
|
||||
# common failure case
|
||||
try:
|
||||
int(ofport)
|
||||
return ofport
|
||||
except ValueError:
|
||||
return constants.INVALID_OFPORT
|
||||
|
||||
def get_datapath_id(self):
|
||||
return self.db_get_val('Bridge',
|
||||
self.br_name, 'datapath_id').strip('"')
|
||||
|
||||
def add_flow(self, **kwargs):
|
||||
flow_str = _build_flow_expr_str(kwargs, 'add')
|
||||
if self.defer_apply_flows:
|
||||
self.deferred_flows['add'] += flow_str + '\n'
|
||||
else:
|
||||
self.run_ofctl("add-flow", [flow_str])
|
||||
|
||||
def mod_flow(self, **kwargs):
|
||||
flow_str = _build_flow_expr_str(kwargs, 'mod')
|
||||
if self.defer_apply_flows:
|
||||
self.deferred_flows['mod'] += flow_str + '\n'
|
||||
else:
|
||||
self.run_ofctl("mod-flows", [flow_str])
|
||||
|
||||
def delete_flows(self, **kwargs):
|
||||
flow_expr_str = _build_flow_expr_str(kwargs, 'del')
|
||||
if self.defer_apply_flows:
|
||||
self.deferred_flows['del'] += flow_expr_str + '\n'
|
||||
else:
|
||||
self.run_ofctl("del-flows", [flow_expr_str])
|
||||
|
||||
def dump_flows_for_table(self, table):
|
||||
retval = None
|
||||
flow_str = "table=%s" % table
|
||||
flows = self.run_ofctl("dump-flows", [flow_str])
|
||||
if flows:
|
||||
retval = '\n'.join(item for item in flows.splitlines()
|
||||
if 'NXST' not in item)
|
||||
return retval
|
||||
|
||||
def defer_apply_on(self):
|
||||
LOG.debug(_('defer_apply_on'))
|
||||
self.defer_apply_flows = True
|
||||
|
||||
def defer_apply_off(self):
|
||||
LOG.debug(_('defer_apply_off'))
|
||||
# Note(ethuleau): stash flows and disable deferred mode. Then apply
|
||||
# flows from the stashed reference to be sure to not purge flows that
|
||||
# were added between two ofctl commands.
|
||||
stashed_deferred_flows, self.deferred_flows = (
|
||||
self.deferred_flows, {'add': '', 'mod': '', 'del': ''}
|
||||
)
|
||||
self.defer_apply_flows = False
|
||||
for action, flows in stashed_deferred_flows.items():
|
||||
if flows:
|
||||
LOG.debug(_('Applying following deferred flows '
|
||||
'to bridge %s'), self.br_name)
|
||||
for line in flows.splitlines():
|
||||
LOG.debug(_('%(action)s: %(flow)s'),
|
||||
{'action': action, 'flow': line})
|
||||
self.run_ofctl('%s-flows' % action, ['-'], flows)
|
||||
|
||||
def add_tunnel_port(self, port_name, remote_ip, local_ip,
|
||||
tunnel_type=p_const.TYPE_GRE,
|
||||
vxlan_udp_port=constants.VXLAN_UDP_PORT,
|
||||
dont_fragment=True):
|
||||
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
|
||||
port_name]
|
||||
vsctl_command.extend(["--", "set", "Interface", port_name,
|
||||
"type=%s" % tunnel_type])
|
||||
if tunnel_type == p_const.TYPE_VXLAN:
|
||||
# Only set the VXLAN UDP port if it's not the default
|
||||
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
|
||||
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
|
||||
vsctl_command.append(("options:df_default=%s" %
|
||||
bool(dont_fragment)).lower())
|
||||
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
|
||||
"options:local_ip=%s" % local_ip,
|
||||
"options:in_key=flow",
|
||||
"options:out_key=flow"])
|
||||
self.run_vsctl(vsctl_command)
|
||||
ofport = self.get_port_ofport(port_name)
|
||||
if (tunnel_type == p_const.TYPE_VXLAN and
|
||||
ofport == constants.INVALID_OFPORT):
|
||||
LOG.error(_('Unable to create VXLAN tunnel port. Please ensure '
|
||||
'that an openvswitch version that supports VXLAN is '
|
||||
'installed.'))
|
||||
return ofport
|
||||
|
||||
def add_patch_port(self, local_name, remote_name):
|
||||
self.run_vsctl(["add-port", self.br_name, local_name,
|
||||
"--", "set", "Interface", local_name,
|
||||
"type=patch", "options:peer=%s" % remote_name])
|
||||
return self.get_port_ofport(local_name)
|
||||
|
||||
def db_get_map(self, table, record, column, check_error=False):
|
||||
output = self.run_vsctl(["get", table, record, column], check_error)
|
||||
if output:
|
||||
output_str = output.rstrip("\n\r")
|
||||
return self.db_str_to_map(output_str)
|
||||
return {}
|
||||
|
||||
def db_get_val(self, table, record, column, check_error=False):
|
||||
output = self.run_vsctl(["get", table, record, column], check_error)
|
||||
if output:
|
||||
return output.rstrip("\n\r")
|
||||
|
||||
def db_str_to_map(self, full_str):
|
||||
list = full_str.strip("{}").split(", ")
|
||||
ret = {}
|
||||
for e in list:
|
||||
if e.find("=") == -1:
|
||||
continue
|
||||
arr = e.split("=")
|
||||
ret[arr[0]] = arr[1].strip("\"")
|
||||
return ret
|
||||
|
||||
def get_port_name_list(self):
|
||||
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
|
||||
if res:
|
||||
return res.strip().split("\n")
|
||||
return []
|
||||
|
||||
def get_port_stats(self, port_name):
|
||||
return self.db_get_map("Interface", port_name, "statistics")
|
||||
|
||||
def get_xapi_iface_id(self, xs_vif_uuid):
|
||||
args = ["xe", "vif-param-get", "param-name=other-config",
|
||||
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
|
||||
try:
|
||||
return utils.execute(args, root_helper=self.root_helper).strip()
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_("Unable to execute %(cmd)s. "
|
||||
"Exception: %(exception)s"),
|
||||
{'cmd': args, 'exception': e})
|
||||
|
||||
# returns a VIF object for each VIF port
|
||||
def get_vif_ports(self):
|
||||
edge_ports = []
|
||||
port_names = self.get_port_name_list()
|
||||
for name in port_names:
|
||||
external_ids = self.db_get_map("Interface", name, "external_ids",
|
||||
check_error=True)
|
||||
ofport = self.db_get_val("Interface", name, "ofport",
|
||||
check_error=True)
|
||||
if "iface-id" in external_ids and "attached-mac" in external_ids:
|
||||
p = VifPort(name, ofport, external_ids["iface-id"],
|
||||
external_ids["attached-mac"], self)
|
||||
edge_ports.append(p)
|
||||
elif ("xs-vif-uuid" in external_ids and
|
||||
"attached-mac" in external_ids):
|
||||
# if this is a xenserver and iface-id is not automatically
|
||||
# synced to OVS from XAPI, we grab it from XAPI directly
|
||||
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
|
||||
p = VifPort(name, ofport, iface_id,
|
||||
external_ids["attached-mac"], self)
|
||||
edge_ports.append(p)
|
||||
|
||||
return edge_ports
|
||||
|
||||
def get_vif_port_set(self):
|
||||
port_names = self.get_port_name_list()
|
||||
edge_ports = set()
|
||||
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
|
||||
'list', 'Interface']
|
||||
result = self.run_vsctl(args, check_error=True)
|
||||
if not result:
|
||||
return edge_ports
|
||||
for row in jsonutils.loads(result)['data']:
|
||||
name = row[0]
|
||||
if name not in port_names:
|
||||
continue
|
||||
external_ids = dict(row[1][1])
|
||||
# Do not consider VIFs which aren't yet ready
|
||||
# This can happen when ofport values are either [] or ["set", []]
|
||||
# We will therefore consider only integer values for ofport
|
||||
ofport = row[2]
|
||||
try:
|
||||
int_ofport = int(ofport)
|
||||
except (ValueError, TypeError):
|
||||
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
|
||||
else:
|
||||
if int_ofport > 0:
|
||||
if ("iface-id" in external_ids and
|
||||
"attached-mac" in external_ids):
|
||||
edge_ports.add(external_ids['iface-id'])
|
||||
elif ("xs-vif-uuid" in external_ids and
|
||||
"attached-mac" in external_ids):
|
||||
# if this is a xenserver and iface-id is not
|
||||
# automatically synced to OVS from XAPI, we grab it
|
||||
# from XAPI directly
|
||||
iface_id = self.get_xapi_iface_id(
|
||||
external_ids["xs-vif-uuid"])
|
||||
edge_ports.add(iface_id)
|
||||
else:
|
||||
LOG.warn(_("Found failed openvswitch port: %s"), row)
|
||||
return edge_ports
|
||||
|
||||
def get_port_tag_dict(self):
|
||||
"""Get a dict of port names and associated vlan tags.
|
||||
|
||||
e.g. the returned dict is of the following form::
|
||||
|
||||
{u'int-br-eth2': [],
|
||||
u'patch-tun': [],
|
||||
u'qr-76d9e6b6-21': 1,
|
||||
u'tapce5318ff-78': 1,
|
||||
u'tape1400310-e6': 1}
|
||||
|
||||
The TAG ID is only available in the "Port" table and is not available
|
||||
in the "Interface" table queried by the get_vif_port_set() method.
|
||||
|
||||
"""
|
||||
port_names = self.get_port_name_list()
|
||||
args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port']
|
||||
result = self.run_vsctl(args, check_error=True)
|
||||
port_tag_dict = {}
|
||||
if not result:
|
||||
return port_tag_dict
|
||||
for name, tag in jsonutils.loads(result)['data']:
|
||||
if name not in port_names:
|
||||
continue
|
||||
# 'tag' can be [u'set', []] or an integer
|
||||
if isinstance(tag, list):
|
||||
tag = tag[1]
|
||||
port_tag_dict[name] = tag
|
||||
return port_tag_dict
|
||||
|
||||
def get_vif_port_by_id(self, port_id):
|
||||
args = ['--format=json', '--', '--columns=external_ids,name,ofport',
|
||||
'find', 'Interface',
|
||||
'external_ids:iface-id="%s"' % port_id]
|
||||
result = self.run_vsctl(args)
|
||||
if not result:
|
||||
return
|
||||
json_result = jsonutils.loads(result)
|
||||
try:
|
||||
# Retrieve the indexes of the columns we're looking for
|
||||
headings = json_result['headings']
|
||||
ext_ids_idx = headings.index('external_ids')
|
||||
name_idx = headings.index('name')
|
||||
ofport_idx = headings.index('ofport')
|
||||
# If data attribute is missing or empty the line below will raise
|
||||
# an exeception which will be captured in this block.
|
||||
# We won't deal with the possibility of ovs-vsctl return multiple
|
||||
# rows since the interface identifier is unique
|
||||
data = json_result['data'][0]
|
||||
port_name = data[name_idx]
|
||||
switch = get_bridge_for_iface(self.root_helper, port_name)
|
||||
if switch != self.br_name:
|
||||
LOG.info(_("Port: %(port_name)s is on %(switch)s,"
|
||||
" not on %(br_name)s"), {'port_name': port_name,
|
||||
'switch': switch,
|
||||
'br_name': self.br_name})
|
||||
return
|
||||
ofport = data[ofport_idx]
|
||||
# ofport must be integer otherwise return None
|
||||
if not isinstance(ofport, int) or ofport == -1:
|
||||
LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a "
|
||||
"positive integer"), {'ofport': ofport,
|
||||
'vif': port_id})
|
||||
return
|
||||
# Find VIF's mac address in external ids
|
||||
ext_id_dict = dict((item[0], item[1]) for item in
|
||||
data[ext_ids_idx][1])
|
||||
vif_mac = ext_id_dict['attached-mac']
|
||||
return VifPort(port_name, ofport, port_id, vif_mac, self)
|
||||
except Exception as e:
|
||||
LOG.warn(_("Unable to parse interface details. Exception: %s"), e)
|
||||
return
|
||||
|
||||
def delete_ports(self, all_ports=False):
|
||||
if all_ports:
|
||||
port_names = self.get_port_name_list()
|
||||
else:
|
||||
port_names = (port.port_name for port in self.get_vif_ports())
|
||||
|
||||
for port_name in port_names:
|
||||
self.delete_port(port_name)
|
||||
|
||||
def get_local_port_mac(self):
|
||||
"""Retrieve the mac of the bridge's local port."""
|
||||
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
|
||||
if address:
|
||||
return address
|
||||
else:
|
||||
msg = _('Unable to determine mac address for %s') % self.br_name
|
||||
raise Exception(msg)
|
||||
|
||||
def __enter__(self):
|
||||
self.create()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
self.destroy()
|
||||
|
||||
|
||||
def get_bridge_for_iface(root_helper, iface):
|
||||
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
|
||||
"iface-to-br", iface]
|
||||
try:
|
||||
return utils.execute(args, root_helper=root_helper).strip()
|
||||
except Exception:
|
||||
LOG.exception(_("Interface %s not found."), iface)
|
||||
return None
|
||||
|
||||
|
||||
def get_bridges(root_helper):
|
||||
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
|
||||
"list-br"]
|
||||
try:
|
||||
return utils.execute(args, root_helper=root_helper).strip().split("\n")
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
|
||||
|
||||
|
||||
def get_bridge_external_bridge_id(root_helper, bridge):
|
||||
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
|
||||
bridge, "bridge-id"]
|
||||
try:
|
||||
return utils.execute(args, root_helper=root_helper).strip()
|
||||
except Exception:
|
||||
LOG.exception(_("Bridge %s not found."), bridge)
|
||||
return None
|
||||
|
||||
|
||||
def _build_flow_expr_str(flow_dict, cmd):
|
||||
flow_expr_arr = []
|
||||
actions = None
|
||||
|
||||
if cmd == 'add':
|
||||
flow_expr_arr.append("hard_timeout=%s" %
|
||||
flow_dict.pop('hard_timeout', '0'))
|
||||
flow_expr_arr.append("idle_timeout=%s" %
|
||||
flow_dict.pop('idle_timeout', '0'))
|
||||
flow_expr_arr.append("priority=%s" %
|
||||
flow_dict.pop('priority', '1'))
|
||||
elif 'priority' in flow_dict:
|
||||
msg = _("Cannot match priority on flow deletion or modification")
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
|
||||
if cmd != 'del':
|
||||
if "actions" not in flow_dict:
|
||||
msg = _("Must specify one or more actions on flow addition"
|
||||
" or modification")
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
actions = "actions=%s" % flow_dict.pop('actions')
|
||||
|
||||
for key, value in flow_dict.iteritems():
|
||||
if key == 'proto':
|
||||
flow_expr_arr.append(value)
|
||||
else:
|
||||
flow_expr_arr.append("%s=%s" % (key, str(value)))
|
||||
|
||||
if actions:
|
||||
flow_expr_arr.append(actions)
|
||||
|
||||
return ','.join(flow_expr_arr)
|
||||
|
||||
|
||||
def ofctl_arg_supported(root_helper, cmd, args):
|
||||
'''Verify if ovs-ofctl binary supports command with specific args.
|
||||
|
||||
:param root_helper: utility to use when running shell cmds.
|
||||
:param cmd: ovs-vsctl command to use for test.
|
||||
:param args: arguments to test with command.
|
||||
:returns: a boolean if the args supported.
|
||||
'''
|
||||
supported = True
|
||||
br_name = 'br-test-%s' % common_utils.get_random_string(6)
|
||||
test_br = OVSBridge(br_name, root_helper)
|
||||
test_br.reset_bridge()
|
||||
|
||||
full_args = ["ovs-ofctl", cmd, test_br.br_name] + args
|
||||
try:
|
||||
utils.execute(full_args, root_helper=root_helper)
|
||||
except Exception:
|
||||
supported = False
|
||||
|
||||
test_br.destroy()
|
||||
return supported
|
|
@ -1,105 +0,0 @@
|
|||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
|
||||
from neutron.agent.linux import async_process
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OvsdbMonitor(async_process.AsyncProcess):
|
||||
"""Manages an invocation of 'ovsdb-client monitor'."""
|
||||
|
||||
def __init__(self, table_name, columns=None, format=None,
|
||||
root_helper=None, respawn_interval=None):
|
||||
|
||||
cmd = ['ovsdb-client', 'monitor', table_name]
|
||||
if columns:
|
||||
cmd.append(','.join(columns))
|
||||
if format:
|
||||
cmd.append('--format=%s' % format)
|
||||
super(OvsdbMonitor, self).__init__(cmd,
|
||||
root_helper=root_helper,
|
||||
respawn_interval=respawn_interval)
|
||||
|
||||
def _read_stdout(self):
|
||||
data = self._process.stdout.readline()
|
||||
if not data:
|
||||
return
|
||||
self._stdout_lines.put(data)
|
||||
LOG.debug(_('Output received from ovsdb monitor: %s') % data)
|
||||
return data
|
||||
|
||||
def _read_stderr(self):
|
||||
data = super(OvsdbMonitor, self)._read_stderr()
|
||||
if data:
|
||||
LOG.error(_('Error received from ovsdb monitor: %s') % data)
|
||||
# Do not return value to ensure that stderr output will
|
||||
# stop the monitor.
|
||||
|
||||
|
||||
class SimpleInterfaceMonitor(OvsdbMonitor):
|
||||
"""Monitors the Interface table of the local host's ovsdb for changes.
|
||||
|
||||
The has_updates() method indicates whether changes to the ovsdb
|
||||
Interface table have been detected since the monitor started or
|
||||
since the previous access.
|
||||
"""
|
||||
|
||||
def __init__(self, root_helper=None, respawn_interval=None):
|
||||
super(SimpleInterfaceMonitor, self).__init__(
|
||||
'Interface',
|
||||
columns=['name', 'ofport'],
|
||||
format='json',
|
||||
root_helper=root_helper,
|
||||
respawn_interval=respawn_interval,
|
||||
)
|
||||
self.data_received = False
|
||||
|
||||
@property
|
||||
def is_active(self):
|
||||
return (self.data_received and
|
||||
self._kill_event and
|
||||
not self._kill_event.ready())
|
||||
|
||||
@property
|
||||
def has_updates(self):
|
||||
"""Indicate whether the ovsdb Interface table has been updated.
|
||||
|
||||
True will be returned if the monitor process is not active.
|
||||
This 'failing open' minimizes the risk of falsely indicating
|
||||
the absence of updates at the expense of potential false
|
||||
positives.
|
||||
"""
|
||||
return bool(list(self.iter_stdout())) or not self.is_active
|
||||
|
||||
def start(self, block=False, timeout=5):
|
||||
super(SimpleInterfaceMonitor, self).start()
|
||||
if block:
|
||||
eventlet.timeout.Timeout(timeout)
|
||||
while not self.is_active:
|
||||
eventlet.sleep()
|
||||
|
||||
def _kill(self, *args, **kwargs):
|
||||
self.data_received = False
|
||||
super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs)
|
||||
|
||||
def _read_stdout(self):
|
||||
data = super(SimpleInterfaceMonitor, self)._read_stdout()
|
||||
if data and not self.data_received:
|
||||
self.data_received = True
|
||||
return data
|
|
@ -1,112 +0,0 @@
|
|||
# Copyright 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
|
||||
import eventlet
|
||||
|
||||
from neutron.agent.linux import ovsdb_monitor
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def get_polling_manager(minimize_polling=False,
|
||||
root_helper=None,
|
||||
ovsdb_monitor_respawn_interval=(
|
||||
constants.DEFAULT_OVSDBMON_RESPAWN)):
|
||||
if minimize_polling:
|
||||
pm = InterfacePollingMinimizer(
|
||||
root_helper=root_helper,
|
||||
ovsdb_monitor_respawn_interval=ovsdb_monitor_respawn_interval)
|
||||
pm.start()
|
||||
else:
|
||||
pm = AlwaysPoll()
|
||||
try:
|
||||
yield pm
|
||||
finally:
|
||||
if minimize_polling:
|
||||
pm.stop()
|
||||
|
||||
|
||||
class BasePollingManager(object):
|
||||
|
||||
def __init__(self):
|
||||
self._force_polling = False
|
||||
self._polling_completed = True
|
||||
|
||||
def force_polling(self):
|
||||
self._force_polling = True
|
||||
|
||||
def polling_completed(self):
|
||||
self._polling_completed = True
|
||||
|
||||
def _is_polling_required(self):
|
||||
raise NotImplemented
|
||||
|
||||
@property
|
||||
def is_polling_required(self):
|
||||
# Always consume the updates to minimize polling.
|
||||
polling_required = self._is_polling_required()
|
||||
|
||||
# Polling is required regardless of whether updates have been
|
||||
# detected.
|
||||
if self._force_polling:
|
||||
self._force_polling = False
|
||||
polling_required = True
|
||||
|
||||
# Polling is required if not yet done for previously detected
|
||||
# updates.
|
||||
if not self._polling_completed:
|
||||
polling_required = True
|
||||
|
||||
if polling_required:
|
||||
# Track whether polling has been completed to ensure that
|
||||
# polling can be required until the caller indicates via a
|
||||
# call to polling_completed() that polling has been
|
||||
# successfully performed.
|
||||
self._polling_completed = False
|
||||
|
||||
return polling_required
|
||||
|
||||
|
||||
class AlwaysPoll(BasePollingManager):
|
||||
|
||||
@property
|
||||
def is_polling_required(self):
|
||||
return True
|
||||
|
||||
|
||||
class InterfacePollingMinimizer(BasePollingManager):
|
||||
"""Monitors ovsdb to determine when polling is required."""
|
||||
|
||||
def __init__(self, root_helper=None,
|
||||
ovsdb_monitor_respawn_interval=(
|
||||
constants.DEFAULT_OVSDBMON_RESPAWN)):
|
||||
|
||||
super(InterfacePollingMinimizer, self).__init__()
|
||||
self._monitor = ovsdb_monitor.SimpleInterfaceMonitor(
|
||||
root_helper=root_helper,
|
||||
respawn_interval=ovsdb_monitor_respawn_interval)
|
||||
|
||||
def start(self):
|
||||
self._monitor.start()
|
||||
|
||||
def stop(self):
|
||||
self._monitor.stop()
|
||||
|
||||
def _is_polling_required(self):
|
||||
# Maximize the chances of update detection having a chance to
|
||||
# collect output.
|
||||
eventlet.sleep()
|
||||
return self._monitor.has_updates
|
|
@ -1,128 +0,0 @@
|
|||
# Copyright 2012 Locaweb.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Juliano Martinez, Locaweb.
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import shlex
|
||||
import socket
|
||||
import struct
|
||||
import tempfile
|
||||
|
||||
from eventlet.green import subprocess
|
||||
from eventlet import greenthread
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import utils
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_process(cmd, root_helper=None, addl_env=None):
|
||||
"""Create a process object for the given command.
|
||||
|
||||
The return value will be a tuple of the process object and the
|
||||
list of command arguments used to create it.
|
||||
"""
|
||||
if root_helper:
|
||||
cmd = shlex.split(root_helper) + cmd
|
||||
cmd = map(str, cmd)
|
||||
|
||||
LOG.debug(_("Running command: %s"), cmd)
|
||||
env = os.environ.copy()
|
||||
if addl_env:
|
||||
env.update(addl_env)
|
||||
|
||||
obj = utils.subprocess_popen(cmd, shell=False,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=env)
|
||||
|
||||
return obj, cmd
|
||||
|
||||
|
||||
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
|
||||
check_exit_code=True, return_stderr=False):
|
||||
try:
|
||||
obj, cmd = create_process(cmd, root_helper=root_helper,
|
||||
addl_env=addl_env)
|
||||
_stdout, _stderr = (process_input and
|
||||
obj.communicate(process_input) or
|
||||
obj.communicate())
|
||||
obj.stdin.close()
|
||||
m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
|
||||
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
|
||||
'stdout': _stdout, 'stderr': _stderr}
|
||||
if obj.returncode:
|
||||
LOG.error(m)
|
||||
if check_exit_code:
|
||||
raise RuntimeError(m)
|
||||
else:
|
||||
LOG.debug(m)
|
||||
finally:
|
||||
# NOTE(termie): this appears to be necessary to let the subprocess
|
||||
# call clean something up in between calls, without
|
||||
# it two execute calls in a row hangs the second one
|
||||
greenthread.sleep(0)
|
||||
|
||||
return return_stderr and (_stdout, _stderr) or _stdout
|
||||
|
||||
|
||||
def get_interface_mac(interface):
|
||||
MAC_START = 18
|
||||
MAC_END = 24
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
info = fcntl.ioctl(s.fileno(), 0x8927,
|
||||
struct.pack('256s', interface[:constants.DEVICE_NAME_MAX_LEN]))
|
||||
return ''.join(['%02x:' % ord(char)
|
||||
for char in info[MAC_START:MAC_END]])[:-1]
|
||||
|
||||
|
||||
def replace_file(file_name, data):
|
||||
"""Replaces the contents of file_name with data in a safe manner.
|
||||
|
||||
First write to a temp file and then rename. Since POSIX renames are
|
||||
atomic, the file is unlikely to be corrupted by competing writes.
|
||||
|
||||
We create the tempfile on the same device to ensure that it can be renamed.
|
||||
"""
|
||||
|
||||
base_dir = os.path.dirname(os.path.abspath(file_name))
|
||||
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
|
||||
tmp_file.write(data)
|
||||
tmp_file.close()
|
||||
os.chmod(tmp_file.name, 0o644)
|
||||
os.rename(tmp_file.name, file_name)
|
||||
|
||||
|
||||
def find_child_pids(pid):
|
||||
"""Retrieve a list of the pids of child processes of the given pid."""
|
||||
|
||||
try:
|
||||
raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])
|
||||
except RuntimeError as e:
|
||||
# Unexpected errors are the responsibility of the caller
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
# Exception has already been logged by execute
|
||||
no_children_found = 'Exit code: 1' in str(e)
|
||||
if no_children_found:
|
||||
ctxt.reraise = False
|
||||
return []
|
||||
return [x.strip() for x in raw_pids.split('\n') if x.strip()]
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
|
@ -1,390 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import httplib2
|
||||
from neutronclient.v2_0 import client
|
||||
from oslo.config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob
|
||||
|
||||
from neutron.agent.common import config as agent_conf
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.common import config
|
||||
from neutron.common import constants as n_const
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils
|
||||
from neutron import context
|
||||
from neutron.openstack.common.cache import cache
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import service
|
||||
from neutron import wsgi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetadataProxyHandler(object):
|
||||
OPTS = [
|
||||
cfg.StrOpt('admin_user',
|
||||
help=_("Admin user")),
|
||||
cfg.StrOpt('admin_password',
|
||||
help=_("Admin password"),
|
||||
secret=True),
|
||||
cfg.StrOpt('admin_tenant_name',
|
||||
help=_("Admin tenant name")),
|
||||
cfg.StrOpt('auth_url',
|
||||
help=_("Authentication URL")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.StrOpt('auth_region',
|
||||
help=_("Authentication region")),
|
||||
cfg.BoolOpt('auth_insecure',
|
||||
default=False,
|
||||
help=_("Turn off verification of the certificate for"
|
||||
" ssl")),
|
||||
cfg.StrOpt('auth_ca_cert',
|
||||
help=_("Certificate Authority public key (CA cert) "
|
||||
"file for ssl")),
|
||||
cfg.StrOpt('endpoint_type',
|
||||
default='adminURL',
|
||||
help=_("Network service endpoint type to pull from "
|
||||
"the keystone catalog")),
|
||||
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
|
||||
help=_("IP address used by Nova metadata server.")),
|
||||
cfg.IntOpt('nova_metadata_port',
|
||||
default=8775,
|
||||
help=_("TCP Port used by Nova metadata server.")),
|
||||
cfg.StrOpt('metadata_proxy_shared_secret',
|
||||
default='',
|
||||
help=_('Shared secret to sign instance-id request'),
|
||||
secret=True),
|
||||
cfg.StrOpt('nova_metadata_protocol',
|
||||
default='http',
|
||||
choices=['http', 'https'],
|
||||
help=_("Protocol to access nova metadata, http or https")),
|
||||
cfg.BoolOpt('nova_metadata_insecure', default=False,
|
||||
help=_("Allow to perform insecure SSL (https) requests to "
|
||||
"nova metadata")),
|
||||
cfg.StrOpt('nova_client_cert',
|
||||
default='',
|
||||
help=_("Client certificate for nova metadata api server.")),
|
||||
cfg.StrOpt('nova_client_priv_key',
|
||||
default='',
|
||||
help=_("Private key of client certificate."))
|
||||
]
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
self.auth_info = {}
|
||||
if self.conf.cache_url:
|
||||
self._cache = cache.get_cache(self.conf.cache_url)
|
||||
else:
|
||||
self._cache = False
|
||||
|
||||
def _get_neutron_client(self):
|
||||
qclient = client.Client(
|
||||
username=self.conf.admin_user,
|
||||
password=self.conf.admin_password,
|
||||
tenant_name=self.conf.admin_tenant_name,
|
||||
auth_url=self.conf.auth_url,
|
||||
auth_strategy=self.conf.auth_strategy,
|
||||
region_name=self.conf.auth_region,
|
||||
token=self.auth_info.get('auth_token'),
|
||||
insecure=self.conf.auth_insecure,
|
||||
ca_cert=self.conf.auth_ca_cert,
|
||||
endpoint_url=self.auth_info.get('endpoint_url'),
|
||||
endpoint_type=self.conf.endpoint_type
|
||||
)
|
||||
return qclient
|
||||
|
||||
@webob.dec.wsgify(RequestClass=webob.Request)
|
||||
def __call__(self, req):
|
||||
try:
|
||||
LOG.debug(_("Request: %s"), req)
|
||||
|
||||
instance_id, tenant_id = self._get_instance_and_tenant_id(req)
|
||||
if instance_id:
|
||||
return self._proxy_request(instance_id, tenant_id, req)
|
||||
else:
|
||||
return webob.exc.HTTPNotFound()
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_("Unexpected error."))
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||
|
||||
@utils.cache_method_results
|
||||
def _get_router_networks(self, router_id):
|
||||
"""Find all networks connected to given router."""
|
||||
qclient = self._get_neutron_client()
|
||||
|
||||
internal_ports = qclient.list_ports(
|
||||
device_id=router_id,
|
||||
device_owner=n_const.DEVICE_OWNER_ROUTER_INTF)['ports']
|
||||
return tuple(p['network_id'] for p in internal_ports)
|
||||
|
||||
@utils.cache_method_results
|
||||
def _get_ports_for_remote_address(self, remote_address, networks):
|
||||
"""Get list of ports that has given ip address and are part of
|
||||
given networks.
|
||||
|
||||
:param networks: list of networks in which the ip address will be
|
||||
searched for
|
||||
|
||||
"""
|
||||
qclient = self._get_neutron_client()
|
||||
|
||||
return qclient.list_ports(
|
||||
network_id=networks,
|
||||
fixed_ips=['ip_address=%s' % remote_address])['ports']
|
||||
|
||||
def _get_ports(self, remote_address, network_id=None, router_id=None):
|
||||
"""Search for all ports that contain passed ip address and belongs to
|
||||
given network.
|
||||
|
||||
If no network is passed ports are searched on all networks connected to
|
||||
given router. Either one of network_id or router_id must be passed.
|
||||
|
||||
"""
|
||||
if network_id:
|
||||
networks = (network_id,)
|
||||
elif router_id:
|
||||
networks = self._get_router_networks(router_id)
|
||||
else:
|
||||
raise TypeError(_("Either one of parameter network_id or router_id"
|
||||
" must be passed to _get_ports method."))
|
||||
|
||||
return self._get_ports_for_remote_address(remote_address, networks)
|
||||
|
||||
def _get_instance_and_tenant_id(self, req):
|
||||
qclient = self._get_neutron_client()
|
||||
|
||||
remote_address = req.headers.get('X-Forwarded-For')
|
||||
network_id = req.headers.get('X-Neutron-Network-ID')
|
||||
router_id = req.headers.get('X-Neutron-Router-ID')
|
||||
|
||||
ports = self._get_ports(remote_address, network_id, router_id)
|
||||
|
||||
self.auth_info = qclient.get_auth_info()
|
||||
if len(ports) == 1:
|
||||
return ports[0]['device_id'], ports[0]['tenant_id']
|
||||
return None, None
|
||||
|
||||
def _proxy_request(self, instance_id, tenant_id, req):
|
||||
headers = {
|
||||
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
|
||||
'X-Instance-ID': instance_id,
|
||||
'X-Tenant-ID': tenant_id,
|
||||
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
|
||||
}
|
||||
|
||||
nova_ip_port = '%s:%s' % (self.conf.nova_metadata_ip,
|
||||
self.conf.nova_metadata_port)
|
||||
url = urlparse.urlunsplit((
|
||||
self.conf.nova_metadata_protocol,
|
||||
nova_ip_port,
|
||||
req.path_info,
|
||||
req.query_string,
|
||||
''))
|
||||
|
||||
h = httplib2.Http(ca_certs=self.conf.auth_ca_cert,
|
||||
disable_ssl_certificate_validation=
|
||||
self.conf.nova_metadata_insecure)
|
||||
if self.conf.nova_client_cert and self.conf.nova_client_priv_key:
|
||||
h.add_certificate(self.conf.nova_client_priv_key,
|
||||
self.conf.nova_client_cert,
|
||||
nova_ip_port)
|
||||
resp, content = h.request(url, method=req.method, headers=headers,
|
||||
body=req.body)
|
||||
|
||||
if resp.status == 200:
|
||||
LOG.debug(str(resp))
|
||||
req.response.content_type = resp['content-type']
|
||||
req.response.body = content
|
||||
return req.response
|
||||
elif resp.status == 403:
|
||||
msg = _(
|
||||
'The remote metadata server responded with Forbidden. This '
|
||||
'response usually occurs when shared secrets do not match.'
|
||||
)
|
||||
LOG.warn(msg)
|
||||
return webob.exc.HTTPForbidden()
|
||||
elif resp.status == 404:
|
||||
return webob.exc.HTTPNotFound()
|
||||
elif resp.status == 409:
|
||||
return webob.exc.HTTPConflict()
|
||||
elif resp.status == 500:
|
||||
msg = _(
|
||||
'Remote metadata server experienced an internal server error.'
|
||||
)
|
||||
LOG.warn(msg)
|
||||
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||
else:
|
||||
raise Exception(_('Unexpected response code: %s') % resp.status)
|
||||
|
||||
def _sign_instance_id(self, instance_id):
|
||||
return hmac.new(self.conf.metadata_proxy_shared_secret,
|
||||
instance_id,
|
||||
hashlib.sha256).hexdigest()
|
||||
|
||||
|
||||
class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
|
||||
def __init__(self, request, client_address, server):
|
||||
if client_address == '':
|
||||
client_address = ('<local>', 0)
|
||||
# base class is old-style, so super does not work properly
|
||||
eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
|
||||
server)
|
||||
|
||||
|
||||
class WorkerService(wsgi.WorkerService):
|
||||
def start(self):
|
||||
self._server = self._service.pool.spawn(self._service._run,
|
||||
self._application,
|
||||
self._service._socket)
|
||||
|
||||
|
||||
class UnixDomainWSGIServer(wsgi.Server):
|
||||
def __init__(self, name):
|
||||
self._socket = None
|
||||
self._launcher = None
|
||||
self._server = None
|
||||
super(UnixDomainWSGIServer, self).__init__(name)
|
||||
|
||||
def start(self, application, file_socket, workers, backlog):
|
||||
self._socket = eventlet.listen(file_socket,
|
||||
family=socket.AF_UNIX,
|
||||
backlog=backlog)
|
||||
if workers < 1:
|
||||
# For the case where only one process is required.
|
||||
self._server = self.pool.spawn_n(self._run, application,
|
||||
self._socket)
|
||||
else:
|
||||
# Minimize the cost of checking for child exit by extending the
|
||||
# wait interval past the default of 0.01s.
|
||||
self._launcher = service.ProcessLauncher(wait_interval=1.0)
|
||||
self._server = WorkerService(self, application)
|
||||
self._launcher.launch_service(self._server, workers=workers)
|
||||
|
||||
def _run(self, application, socket):
|
||||
"""Start a WSGI service in a new green thread."""
|
||||
logger = logging.getLogger('eventlet.wsgi.server')
|
||||
eventlet.wsgi.server(socket,
|
||||
application,
|
||||
custom_pool=self.pool,
|
||||
protocol=UnixDomainHttpProtocol,
|
||||
log=logging.WritableLogger(logger))
|
||||
|
||||
|
||||
class UnixDomainMetadataProxy(object):
|
||||
OPTS = [
|
||||
cfg.StrOpt('metadata_proxy_socket',
|
||||
default='$state_path/metadata_proxy',
|
||||
help=_('Location for Metadata Proxy UNIX domain socket')),
|
||||
cfg.IntOpt('metadata_workers',
|
||||
default=utils.cpu_count() // 2,
|
||||
help=_('Number of separate worker processes for metadata '
|
||||
'server')),
|
||||
cfg.IntOpt('metadata_backlog',
|
||||
default=4096,
|
||||
help=_('Number of backlog requests to configure the '
|
||||
'metadata server socket with'))
|
||||
]
|
||||
|
||||
def __init__(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket)
|
||||
if os.path.isdir(dirname):
|
||||
try:
|
||||
os.unlink(cfg.CONF.metadata_proxy_socket)
|
||||
except OSError:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if not os.path.exists(cfg.CONF.metadata_proxy_socket):
|
||||
ctxt.reraise = False
|
||||
else:
|
||||
os.makedirs(dirname, 0o755)
|
||||
|
||||
self._init_state_reporting()
|
||||
|
||||
def _init_state_reporting(self):
|
||||
self.context = context.get_admin_context_without_session()
|
||||
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
|
||||
self.agent_state = {
|
||||
'binary': 'neutron-metadata-agent',
|
||||
'host': cfg.CONF.host,
|
||||
'topic': 'N/A',
|
||||
'configurations': {
|
||||
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
|
||||
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
|
||||
'nova_metadata_port': cfg.CONF.nova_metadata_port,
|
||||
},
|
||||
'start_flag': True,
|
||||
'agent_type': n_const.AGENT_TYPE_METADATA}
|
||||
report_interval = cfg.CONF.AGENT.report_interval
|
||||
if report_interval:
|
||||
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
|
||||
self._report_state)
|
||||
self.heartbeat.start(interval=report_interval)
|
||||
|
||||
def _report_state(self):
|
||||
try:
|
||||
self.state_rpc.report_state(
|
||||
self.context,
|
||||
self.agent_state,
|
||||
use_call=self.agent_state.get('start_flag'))
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_('Neutron server does not support state report.'
|
||||
' State report for this agent will be disabled.'))
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_("Failed reporting state!"))
|
||||
return
|
||||
self.agent_state.pop('start_flag', None)
|
||||
|
||||
def run(self):
|
||||
server = UnixDomainWSGIServer('neutron-metadata-agent')
|
||||
server.start(MetadataProxyHandler(self.conf),
|
||||
self.conf.metadata_proxy_socket,
|
||||
workers=self.conf.metadata_workers,
|
||||
backlog=self.conf.metadata_backlog)
|
||||
server.wait()
|
||||
|
||||
|
||||
def main():
|
||||
cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS)
|
||||
cfg.CONF.register_opts(MetadataProxyHandler.OPTS)
|
||||
cache.register_oslo_configs(cfg.CONF)
|
||||
cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5')
|
||||
agent_conf.register_agent_state_opts_helper(cfg.CONF)
|
||||
config.init(sys.argv[1:])
|
||||
config.setup_logging(cfg.CONF)
|
||||
utils.log_opt_values(LOG)
|
||||
proxy = UnixDomainMetadataProxy(cfg.CONF)
|
||||
proxy.run()
|
|
@ -1,182 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
import httplib
|
||||
import socket
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import httplib2
|
||||
from oslo.config import cfg
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob
|
||||
|
||||
from neutron.agent.linux import daemon
|
||||
from neutron.common import config
|
||||
from neutron.common import utils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import wsgi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnixDomainHTTPConnection(httplib.HTTPConnection):
|
||||
"""Connection class for HTTP over UNIX domain socket."""
|
||||
def __init__(self, host, port=None, strict=None, timeout=None,
|
||||
proxy_info=None):
|
||||
httplib.HTTPConnection.__init__(self, host, port, strict)
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self):
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
if self.timeout:
|
||||
self.sock.settimeout(self.timeout)
|
||||
self.sock.connect(cfg.CONF.metadata_proxy_socket)
|
||||
|
||||
|
||||
class NetworkMetadataProxyHandler(object):
|
||||
"""Proxy AF_INET metadata request through Unix Domain socket.
|
||||
|
||||
The Unix domain socket allows the proxy access resource that are not
|
||||
accessible within the isolated tenant context.
|
||||
"""
|
||||
|
||||
def __init__(self, network_id=None, router_id=None):
|
||||
self.network_id = network_id
|
||||
self.router_id = router_id
|
||||
|
||||
if network_id is None and router_id is None:
|
||||
msg = _('network_id and router_id are None. One must be provided.')
|
||||
raise ValueError(msg)
|
||||
|
||||
@webob.dec.wsgify(RequestClass=webob.Request)
|
||||
def __call__(self, req):
|
||||
LOG.debug(_("Request: %s"), req)
|
||||
try:
|
||||
return self._proxy_request(req.remote_addr,
|
||||
req.method,
|
||||
req.path_info,
|
||||
req.query_string,
|
||||
req.body)
|
||||
except Exception:
|
||||
LOG.exception(_("Unexpected error."))
|
||||
msg = _('An unknown error has occurred. '
|
||||
'Please try your request again.')
|
||||
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||
|
||||
def _proxy_request(self, remote_address, method, path_info,
|
||||
query_string, body):
|
||||
headers = {
|
||||
'X-Forwarded-For': remote_address,
|
||||
}
|
||||
|
||||
if self.router_id:
|
||||
headers['X-Neutron-Router-ID'] = self.router_id
|
||||
else:
|
||||
headers['X-Neutron-Network-ID'] = self.network_id
|
||||
|
||||
url = urlparse.urlunsplit((
|
||||
'http',
|
||||
'169.254.169.254', # a dummy value to make the request proper
|
||||
path_info,
|
||||
query_string,
|
||||
''))
|
||||
|
||||
h = httplib2.Http()
|
||||
resp, content = h.request(
|
||||
url,
|
||||
method=method,
|
||||
headers=headers,
|
||||
body=body,
|
||||
connection_type=UnixDomainHTTPConnection)
|
||||
|
||||
if resp.status == 200:
|
||||
LOG.debug(resp)
|
||||
LOG.debug(content)
|
||||
response = webob.Response()
|
||||
response.status = resp.status
|
||||
response.headers['Content-Type'] = resp['content-type']
|
||||
response.body = content
|
||||
return response
|
||||
elif resp.status == 404:
|
||||
return webob.exc.HTTPNotFound()
|
||||
elif resp.status == 409:
|
||||
return webob.exc.HTTPConflict()
|
||||
elif resp.status == 500:
|
||||
msg = _(
|
||||
'Remote metadata server experienced an internal server error.'
|
||||
)
|
||||
LOG.debug(msg)
|
||||
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
|
||||
else:
|
||||
raise Exception(_('Unexpected response code: %s') % resp.status)
|
||||
|
||||
|
||||
class ProxyDaemon(daemon.Daemon):
|
||||
def __init__(self, pidfile, port, network_id=None, router_id=None):
|
||||
uuid = network_id or router_id
|
||||
super(ProxyDaemon, self).__init__(pidfile, uuid=uuid)
|
||||
self.network_id = network_id
|
||||
self.router_id = router_id
|
||||
self.port = port
|
||||
|
||||
def run(self):
|
||||
handler = NetworkMetadataProxyHandler(
|
||||
self.network_id,
|
||||
self.router_id)
|
||||
proxy = wsgi.Server('neutron-network-metadata-proxy')
|
||||
proxy.start(handler, self.port)
|
||||
proxy.wait()
|
||||
|
||||
|
||||
def main():
|
||||
opts = [
|
||||
cfg.StrOpt('network_id',
|
||||
help=_('Network that will have instance metadata '
|
||||
'proxied.')),
|
||||
cfg.StrOpt('router_id',
|
||||
help=_('Router that will have connected instances\' '
|
||||
'metadata proxied.')),
|
||||
cfg.StrOpt('pid_file',
|
||||
help=_('Location of pid file of this process.')),
|
||||
cfg.BoolOpt('daemonize',
|
||||
default=True,
|
||||
help=_('Run as daemon.')),
|
||||
cfg.IntOpt('metadata_port',
|
||||
default=9697,
|
||||
help=_("TCP Port to listen for metadata server "
|
||||
"requests.")),
|
||||
cfg.StrOpt('metadata_proxy_socket',
|
||||
default='$state_path/metadata_proxy',
|
||||
help=_('Location of Metadata Proxy UNIX domain '
|
||||
'socket'))
|
||||
]
|
||||
|
||||
cfg.CONF.register_cli_opts(opts)
|
||||
# Don't get the default configuration file
|
||||
cfg.CONF(project='neutron', default_config_files=[])
|
||||
config.setup_logging(cfg.CONF)
|
||||
utils.log_opt_values(LOG)
|
||||
proxy = ProxyDaemon(cfg.CONF.pid_file,
|
||||
cfg.CONF.metadata_port,
|
||||
network_id=cfg.CONF.network_id,
|
||||
router_id=cfg.CONF.router_id)
|
||||
|
||||
if cfg.CONF.daemonize:
|
||||
proxy.start()
|
||||
else:
|
||||
proxy.run()
|
|
@ -1,174 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.common import config as agent_config
|
||||
from neutron.agent import dhcp_agent
|
||||
from neutron.agent import l3_agent
|
||||
from neutron.agent.linux import dhcp
|
||||
from neutron.agent.linux import interface
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import ovs_lib
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import config
|
||||
from neutron.openstack.common import importutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
NS_MANGLING_PATTERN = ('(%s|%s)' % (dhcp.NS_PREFIX, l3_agent.NS_PREFIX) +
|
||||
attributes.UUID_PATTERN)
|
||||
|
||||
|
||||
class FakeDhcpPlugin(object):
|
||||
"""Fake RPC plugin to bypass any RPC calls."""
|
||||
def __getattribute__(self, name):
|
||||
def fake_method(*args):
|
||||
pass
|
||||
return fake_method
|
||||
|
||||
|
||||
def setup_conf():
|
||||
"""Setup the cfg for the clean up utility.
|
||||
|
||||
Use separate setup_conf for the utility because there are many options
|
||||
from the main config that do not apply during clean-up.
|
||||
"""
|
||||
|
||||
cli_opts = [
|
||||
cfg.BoolOpt('force',
|
||||
default=False,
|
||||
help=_('Delete the namespace by removing all devices.')),
|
||||
]
|
||||
|
||||
conf = cfg.CONF
|
||||
conf.register_cli_opts(cli_opts)
|
||||
agent_config.register_interface_driver_opts_helper(conf)
|
||||
agent_config.register_use_namespaces_opts_helper(conf)
|
||||
agent_config.register_root_helper(conf)
|
||||
conf.register_opts(dhcp.OPTS)
|
||||
conf.register_opts(dhcp_agent.DhcpAgent.OPTS)
|
||||
conf.register_opts(interface.OPTS)
|
||||
return conf
|
||||
|
||||
|
||||
def kill_dhcp(conf, namespace):
|
||||
"""Disable DHCP for a network if DHCP is still active."""
|
||||
root_helper = agent_config.get_root_helper(conf)
|
||||
network_id = namespace.replace(dhcp.NS_PREFIX, '')
|
||||
|
||||
dhcp_driver = importutils.import_object(
|
||||
conf.dhcp_driver,
|
||||
conf=conf,
|
||||
network=dhcp.NetModel(conf.use_namespaces, {'id': network_id}),
|
||||
root_helper=root_helper,
|
||||
plugin=FakeDhcpPlugin())
|
||||
|
||||
if dhcp_driver.active:
|
||||
dhcp_driver.disable()
|
||||
|
||||
|
||||
def eligible_for_deletion(conf, namespace, force=False):
|
||||
"""Determine whether a namespace is eligible for deletion.
|
||||
|
||||
Eligibility is determined by having only the lo device or if force
|
||||
is passed as a parameter.
|
||||
"""
|
||||
|
||||
# filter out namespaces without UUID as the name
|
||||
if not re.match(NS_MANGLING_PATTERN, namespace):
|
||||
return False
|
||||
|
||||
root_helper = agent_config.get_root_helper(conf)
|
||||
ip = ip_lib.IPWrapper(root_helper, namespace)
|
||||
return force or ip.namespace_is_empty()
|
||||
|
||||
|
||||
def unplug_device(conf, device):
|
||||
try:
|
||||
device.link.delete()
|
||||
except RuntimeError:
|
||||
root_helper = agent_config.get_root_helper(conf)
|
||||
# Maybe the device is OVS port, so try to delete
|
||||
bridge_name = ovs_lib.get_bridge_for_iface(root_helper, device.name)
|
||||
if bridge_name:
|
||||
bridge = ovs_lib.OVSBridge(bridge_name, root_helper)
|
||||
bridge.delete_port(device.name)
|
||||
else:
|
||||
LOG.debug(_('Unable to find bridge for device: %s'), device.name)
|
||||
|
||||
|
||||
def destroy_namespace(conf, namespace, force=False):
|
||||
"""Destroy a given namespace.
|
||||
|
||||
If force is True, then dhcp (if it exists) will be disabled and all
|
||||
devices will be forcibly removed.
|
||||
"""
|
||||
|
||||
try:
|
||||
root_helper = agent_config.get_root_helper(conf)
|
||||
ip = ip_lib.IPWrapper(root_helper, namespace)
|
||||
|
||||
if force:
|
||||
kill_dhcp(conf, namespace)
|
||||
# NOTE: The dhcp driver will remove the namespace if is it empty,
|
||||
# so a second check is required here.
|
||||
if ip.netns.exists(namespace):
|
||||
for device in ip.get_devices(exclude_loopback=True):
|
||||
unplug_device(conf, device)
|
||||
|
||||
ip.garbage_collect_namespace()
|
||||
except Exception:
|
||||
LOG.exception(_('Error unable to destroy namespace: %s'), namespace)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main method for cleaning up network namespaces.
|
||||
|
||||
This method will make two passes checking for namespaces to delete. The
|
||||
process will identify candidates, sleep, and call garbage collect. The
|
||||
garbage collection will re-verify that the namespace meets the criteria for
|
||||
deletion (ie it is empty). The period of sleep and the 2nd pass allow
|
||||
time for the namespace state to settle, so that the check prior deletion
|
||||
will re-confirm the namespace is empty.
|
||||
|
||||
The utility is designed to clean-up after the forced or unexpected
|
||||
termination of Neutron agents.
|
||||
|
||||
The --force flag should only be used as part of the cleanup of a devstack
|
||||
installation as it will blindly purge namespaces and their devices. This
|
||||
option also kills any lingering DHCP instances.
|
||||
"""
|
||||
conf = setup_conf()
|
||||
conf()
|
||||
config.setup_logging(conf)
|
||||
|
||||
root_helper = agent_config.get_root_helper(conf)
|
||||
# Identify namespaces that are candidates for deletion.
|
||||
candidates = [ns for ns in
|
||||
ip_lib.IPWrapper.get_namespaces(root_helper)
|
||||
if eligible_for_deletion(conf, ns, conf.force)]
|
||||
|
||||
if candidates:
|
||||
eventlet.sleep(2)
|
||||
|
||||
for namespace in candidates:
|
||||
destroy_namespace(conf, namespace, conf.force)
|
|
@ -1,110 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.agent.common import config as agent_config
|
||||
from neutron.agent import l3_agent
|
||||
from neutron.agent.linux import interface
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import ovs_lib
|
||||
from neutron.common import config
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup_conf():
|
||||
"""Setup the cfg for the clean up utility.
|
||||
|
||||
Use separate setup_conf for the utility because there are many options
|
||||
from the main config that do not apply during clean-up.
|
||||
"""
|
||||
opts = [
|
||||
cfg.BoolOpt('ovs_all_ports',
|
||||
default=False,
|
||||
help=_('True to delete all ports on all the OpenvSwitch '
|
||||
'bridges. False to delete ports created by '
|
||||
'Neutron on integration and external network '
|
||||
'bridges.'))
|
||||
]
|
||||
|
||||
conf = cfg.CONF
|
||||
conf.register_cli_opts(opts)
|
||||
conf.register_opts(l3_agent.L3NATAgent.OPTS)
|
||||
conf.register_opts(interface.OPTS)
|
||||
agent_config.register_interface_driver_opts_helper(conf)
|
||||
agent_config.register_use_namespaces_opts_helper(conf)
|
||||
agent_config.register_root_helper(conf)
|
||||
return conf
|
||||
|
||||
|
||||
def collect_neutron_ports(bridges, root_helper):
|
||||
"""Collect ports created by Neutron from OVS."""
|
||||
ports = []
|
||||
for bridge in bridges:
|
||||
ovs = ovs_lib.OVSBridge(bridge, root_helper)
|
||||
ports += [port.port_name for port in ovs.get_vif_ports()]
|
||||
return ports
|
||||
|
||||
|
||||
def delete_neutron_ports(ports, root_helper):
|
||||
"""Delete non-internal ports created by Neutron
|
||||
|
||||
Non-internal OVS ports need to be removed manually.
|
||||
"""
|
||||
for port in ports:
|
||||
if ip_lib.device_exists(port):
|
||||
device = ip_lib.IPDevice(port, root_helper)
|
||||
device.link.delete()
|
||||
LOG.info(_("Delete %s"), port)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main method for cleaning up OVS bridges.
|
||||
|
||||
The utility cleans up the integration bridges used by Neutron.
|
||||
"""
|
||||
|
||||
conf = setup_conf()
|
||||
conf()
|
||||
config.setup_logging(conf)
|
||||
|
||||
configuration_bridges = set([conf.ovs_integration_bridge,
|
||||
conf.external_network_bridge])
|
||||
ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper))
|
||||
available_configuration_bridges = configuration_bridges & ovs_bridges
|
||||
|
||||
if conf.ovs_all_ports:
|
||||
bridges = ovs_bridges
|
||||
else:
|
||||
bridges = available_configuration_bridges
|
||||
|
||||
# Collect existing ports created by Neutron on configuration bridges.
|
||||
# After deleting ports from OVS bridges, we cannot determine which
|
||||
# ports were created by Neutron, so port information is collected now.
|
||||
ports = collect_neutron_ports(available_configuration_bridges,
|
||||
conf.AGENT.root_helper)
|
||||
|
||||
for bridge in bridges:
|
||||
LOG.info(_("Cleaning %s"), bridge)
|
||||
ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper)
|
||||
ovs.delete_ports(all_ports=conf.ovs_all_ports)
|
||||
|
||||
# Remove remaining ports created by Neutron (usually veth pair)
|
||||
delete_neutron_ports(ports, conf.AGENT.root_helper)
|
||||
|
||||
LOG.info(_("OVS cleanup completed successfully"))
|
|
@ -1,134 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
from oslo import messaging
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import timeutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_consumers(endpoints, prefix, topic_details):
|
||||
"""Create agent RPC consumers.
|
||||
|
||||
:param endpoints: The list of endpoints to process the incoming messages.
|
||||
:param prefix: Common prefix for the plugin/agent message queues.
|
||||
:param topic_details: A list of topics. Each topic has a name, an
|
||||
operation, and an optional host param keying the
|
||||
subscription to topic.host for plugin calls.
|
||||
|
||||
:returns: A common Connection.
|
||||
"""
|
||||
|
||||
connection = n_rpc.create_connection(new=True)
|
||||
for details in topic_details:
|
||||
topic, operation, node_name = itertools.islice(
|
||||
itertools.chain(details, [None]), 3)
|
||||
|
||||
topic_name = topics.get_topic_name(prefix, topic, operation)
|
||||
connection.create_consumer(topic_name, endpoints, fanout=True)
|
||||
if node_name:
|
||||
node_topic_name = '%s.%s' % (topic_name, node_name)
|
||||
connection.create_consumer(node_topic_name,
|
||||
endpoints,
|
||||
fanout=False)
|
||||
connection.consume_in_threads()
|
||||
return connection
|
||||
|
||||
|
||||
class PluginReportStateAPI(n_rpc.RpcProxy):
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self, topic):
|
||||
super(PluginReportStateAPI, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def report_state(self, context, agent_state, use_call=False):
|
||||
msg = self.make_msg('report_state',
|
||||
agent_state={'agent_state':
|
||||
agent_state},
|
||||
time=timeutils.strtime())
|
||||
if use_call:
|
||||
return self.call(context, msg, topic=self.topic)
|
||||
else:
|
||||
return self.cast(context, msg, topic=self.topic)
|
||||
|
||||
|
||||
class PluginApi(n_rpc.RpcProxy):
|
||||
'''Agent side of the rpc API.
|
||||
|
||||
API version history:
|
||||
1.0 - Initial version.
|
||||
1.3 - get_device_details rpc signature upgrade to obtain 'host' and
|
||||
return value to include fixed_ips and device_owner for
|
||||
the device port
|
||||
'''
|
||||
|
||||
BASE_RPC_API_VERSION = '1.3'
|
||||
|
||||
def __init__(self, topic):
|
||||
super(PluginApi, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def get_device_details(self, context, device, agent_id, host=None):
|
||||
return self.call(context,
|
||||
self.make_msg('get_device_details', device=device,
|
||||
agent_id=agent_id,
|
||||
host=host),
|
||||
topic=self.topic)
|
||||
|
||||
def get_devices_details_list(self, context, devices, agent_id, host=None):
|
||||
res = []
|
||||
try:
|
||||
res = self.call(context,
|
||||
self.make_msg('get_devices_details_list',
|
||||
devices=devices,
|
||||
agent_id=agent_id,
|
||||
host=host),
|
||||
topic=self.topic,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
except messaging.UnsupportedVersion:
|
||||
res = [
|
||||
self.call(context,
|
||||
self.make_msg('get_device_details', device=device,
|
||||
agent_id=agent_id, host=host),
|
||||
topic=self.topic)
|
||||
for device in devices
|
||||
]
|
||||
return res
|
||||
|
||||
def update_device_down(self, context, device, agent_id, host=None):
|
||||
return self.call(context,
|
||||
self.make_msg('update_device_down', device=device,
|
||||
agent_id=agent_id, host=host),
|
||||
topic=self.topic)
|
||||
|
||||
def update_device_up(self, context, device, agent_id, host=None):
|
||||
return self.call(context,
|
||||
self.make_msg('update_device_up', device=device,
|
||||
agent_id=agent_id, host=host),
|
||||
topic=self.topic)
|
||||
|
||||
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None):
|
||||
return self.call(context,
|
||||
self.make_msg('tunnel_sync', tunnel_ip=tunnel_ip,
|
||||
tunnel_type=tunnel_type),
|
||||
topic=self.topic)
|
|
@ -1,301 +0,0 @@
|
|||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import topics
|
||||
from neutron.openstack.common import importutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
SG_RPC_VERSION = "1.1"
|
||||
|
||||
security_group_opts = [
|
||||
cfg.StrOpt(
|
||||
'firewall_driver',
|
||||
help=_('Driver for security groups firewall in the L2 agent')),
|
||||
cfg.BoolOpt(
|
||||
'enable_security_group',
|
||||
default=True,
|
||||
help=_(
|
||||
'Controls whether the neutron security group API is enabled '
|
||||
'in the server. It should be false when using no security '
|
||||
'groups or using the nova security group API.'))
|
||||
]
|
||||
cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP')
|
||||
|
||||
|
||||
#This is backward compatibility check for Havana
|
||||
def _is_valid_driver_combination():
|
||||
return ((cfg.CONF.SECURITYGROUP.enable_security_group and
|
||||
(cfg.CONF.SECURITYGROUP.firewall_driver and
|
||||
cfg.CONF.SECURITYGROUP.firewall_driver !=
|
||||
'neutron.agent.firewall.NoopFirewallDriver')) or
|
||||
(not cfg.CONF.SECURITYGROUP.enable_security_group and
|
||||
(cfg.CONF.SECURITYGROUP.firewall_driver ==
|
||||
'neutron.agent.firewall.NoopFirewallDriver' or
|
||||
cfg.CONF.SECURITYGROUP.firewall_driver is None)
|
||||
))
|
||||
|
||||
|
||||
def is_firewall_enabled():
|
||||
if not _is_valid_driver_combination():
|
||||
LOG.warn(_("Driver configuration doesn't match with "
|
||||
"enable_security_group"))
|
||||
|
||||
return cfg.CONF.SECURITYGROUP.enable_security_group
|
||||
|
||||
|
||||
def _disable_extension(extension, aliases):
|
||||
if extension in aliases:
|
||||
aliases.remove(extension)
|
||||
|
||||
|
||||
def disable_security_group_extension_by_config(aliases):
|
||||
if not is_firewall_enabled():
|
||||
LOG.info(_('Disabled security-group extension.'))
|
||||
_disable_extension('security-group', aliases)
|
||||
LOG.info(_('Disabled allowed-address-pairs extension.'))
|
||||
_disable_extension('allowed-address-pairs', aliases)
|
||||
|
||||
|
||||
class SecurityGroupServerRpcApiMixin(object):
|
||||
"""A mix-in that enable SecurityGroup support in plugin rpc."""
|
||||
def security_group_rules_for_devices(self, context, devices):
|
||||
LOG.debug(_("Get security group rules "
|
||||
"for devices via rpc %r"), devices)
|
||||
return self.call(context,
|
||||
self.make_msg('security_group_rules_for_devices',
|
||||
devices=devices),
|
||||
version=SG_RPC_VERSION,
|
||||
topic=self.topic)
|
||||
|
||||
|
||||
class SecurityGroupAgentRpcCallbackMixin(object):
|
||||
"""A mix-in that enable SecurityGroup agent
|
||||
support in agent implementations.
|
||||
"""
|
||||
#mix-in object should be have sg_agent
|
||||
sg_agent = None
|
||||
|
||||
def _security_groups_agent_not_set(self):
|
||||
LOG.warning(_("Security group agent binding currently not set. "
|
||||
"This should be set by the end of the init "
|
||||
"process."))
|
||||
|
||||
def security_groups_rule_updated(self, context, **kwargs):
|
||||
"""Callback for security group rule update.
|
||||
|
||||
:param security_groups: list of updated security_groups
|
||||
"""
|
||||
security_groups = kwargs.get('security_groups', [])
|
||||
LOG.debug(
|
||||
_("Security group rule updated on remote: %s"), security_groups)
|
||||
if not self.sg_agent:
|
||||
return self._security_groups_agent_not_set()
|
||||
self.sg_agent.security_groups_rule_updated(security_groups)
|
||||
|
||||
def security_groups_member_updated(self, context, **kwargs):
|
||||
"""Callback for security group member update.
|
||||
|
||||
:param security_groups: list of updated security_groups
|
||||
"""
|
||||
security_groups = kwargs.get('security_groups', [])
|
||||
LOG.debug(
|
||||
_("Security group member updated on remote: %s"), security_groups)
|
||||
if not self.sg_agent:
|
||||
return self._security_groups_agent_not_set()
|
||||
self.sg_agent.security_groups_member_updated(security_groups)
|
||||
|
||||
def security_groups_provider_updated(self, context, **kwargs):
|
||||
"""Callback for security group provider update."""
|
||||
LOG.debug(_("Provider rule updated"))
|
||||
if not self.sg_agent:
|
||||
return self._security_groups_agent_not_set()
|
||||
self.sg_agent.security_groups_provider_updated()
|
||||
|
||||
|
||||
class SecurityGroupAgentRpcMixin(object):
|
||||
"""A mix-in that enable SecurityGroup agent
|
||||
support in agent implementations.
|
||||
"""
|
||||
|
||||
def init_firewall(self, defer_refresh_firewall=False):
|
||||
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
|
||||
LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver)
|
||||
if not _is_valid_driver_combination():
|
||||
LOG.warn(_("Driver configuration doesn't match "
|
||||
"with enable_security_group"))
|
||||
if not firewall_driver:
|
||||
firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver'
|
||||
self.firewall = importutils.import_object(firewall_driver)
|
||||
# The following flag will be set to true if port filter must not be
|
||||
# applied as soon as a rule or membership notification is received
|
||||
self.defer_refresh_firewall = defer_refresh_firewall
|
||||
# Stores devices for which firewall should be refreshed when
|
||||
# deferred refresh is enabled.
|
||||
self.devices_to_refilter = set()
|
||||
# Flag raised when a global refresh is needed
|
||||
self.global_refresh_firewall = False
|
||||
|
||||
def prepare_devices_filter(self, device_ids):
|
||||
if not device_ids:
|
||||
return
|
||||
LOG.info(_("Preparing filters for devices %s"), device_ids)
|
||||
devices = self.plugin_rpc.security_group_rules_for_devices(
|
||||
self.context, list(device_ids))
|
||||
with self.firewall.defer_apply():
|
||||
for device in devices.values():
|
||||
self.firewall.prepare_port_filter(device)
|
||||
|
||||
def security_groups_rule_updated(self, security_groups):
|
||||
LOG.info(_("Security group "
|
||||
"rule updated %r"), security_groups)
|
||||
self._security_group_updated(
|
||||
security_groups,
|
||||
'security_groups')
|
||||
|
||||
def security_groups_member_updated(self, security_groups):
|
||||
LOG.info(_("Security group "
|
||||
"member updated %r"), security_groups)
|
||||
self._security_group_updated(
|
||||
security_groups,
|
||||
'security_group_source_groups')
|
||||
|
||||
def _security_group_updated(self, security_groups, attribute):
|
||||
devices = []
|
||||
sec_grp_set = set(security_groups)
|
||||
for device in self.firewall.ports.values():
|
||||
if sec_grp_set & set(device.get(attribute, [])):
|
||||
devices.append(device['device'])
|
||||
if devices:
|
||||
if self.defer_refresh_firewall:
|
||||
LOG.debug(_("Adding %s devices to the list of devices "
|
||||
"for which firewall needs to be refreshed"),
|
||||
devices)
|
||||
self.devices_to_refilter |= set(devices)
|
||||
else:
|
||||
self.refresh_firewall(devices)
|
||||
|
||||
def security_groups_provider_updated(self):
|
||||
LOG.info(_("Provider rule updated"))
|
||||
if self.defer_refresh_firewall:
|
||||
# NOTE(salv-orlando): A 'global refresh' might not be
|
||||
# necessary if the subnet for which the provider rules
|
||||
# were updated is known
|
||||
self.global_refresh_firewall = True
|
||||
else:
|
||||
self.refresh_firewall()
|
||||
|
||||
def remove_devices_filter(self, device_ids):
|
||||
if not device_ids:
|
||||
return
|
||||
LOG.info(_("Remove device filter for %r"), device_ids)
|
||||
with self.firewall.defer_apply():
|
||||
for device_id in device_ids:
|
||||
device = self.firewall.ports.get(device_id)
|
||||
if not device:
|
||||
continue
|
||||
self.firewall.remove_port_filter(device)
|
||||
|
||||
def refresh_firewall(self, device_ids=None):
|
||||
LOG.info(_("Refresh firewall rules"))
|
||||
if not device_ids:
|
||||
device_ids = self.firewall.ports.keys()
|
||||
if not device_ids:
|
||||
LOG.info(_("No ports here to refresh firewall"))
|
||||
return
|
||||
devices = self.plugin_rpc.security_group_rules_for_devices(
|
||||
self.context, device_ids)
|
||||
with self.firewall.defer_apply():
|
||||
for device in devices.values():
|
||||
LOG.debug(_("Update port filter for %s"), device['device'])
|
||||
self.firewall.update_port_filter(device)
|
||||
|
||||
def firewall_refresh_needed(self):
|
||||
return self.global_refresh_firewall or self.devices_to_refilter
|
||||
|
||||
def setup_port_filters(self, new_devices, updated_devices):
|
||||
"""Configure port filters for devices.
|
||||
|
||||
This routine applies filters for new devices and refreshes firewall
|
||||
rules when devices have been updated, or when there are changes in
|
||||
security group membership or rules.
|
||||
|
||||
:param new_devices: set containing identifiers for new devices
|
||||
:param updated_devices: set containining identifiers for
|
||||
updated devices
|
||||
"""
|
||||
if new_devices:
|
||||
LOG.debug(_("Preparing device filters for %d new devices"),
|
||||
len(new_devices))
|
||||
self.prepare_devices_filter(new_devices)
|
||||
# These data structures are cleared here in order to avoid
|
||||
# losing updates occurring during firewall refresh
|
||||
devices_to_refilter = self.devices_to_refilter
|
||||
global_refresh_firewall = self.global_refresh_firewall
|
||||
self.devices_to_refilter = set()
|
||||
self.global_refresh_firewall = False
|
||||
# TODO(salv-orlando): Avoid if possible ever performing the global
|
||||
# refresh providing a precise list of devices for which firewall
|
||||
# should be refreshed
|
||||
if global_refresh_firewall:
|
||||
LOG.debug(_("Refreshing firewall for all filtered devices"))
|
||||
self.refresh_firewall()
|
||||
else:
|
||||
# If a device is both in new and updated devices
|
||||
# avoid reprocessing it
|
||||
updated_devices = ((updated_devices | devices_to_refilter) -
|
||||
new_devices)
|
||||
if updated_devices:
|
||||
LOG.debug(_("Refreshing firewall for %d devices"),
|
||||
len(updated_devices))
|
||||
self.refresh_firewall(updated_devices)
|
||||
|
||||
|
||||
class SecurityGroupAgentRpcApiMixin(object):
|
||||
|
||||
def _get_security_group_topic(self):
|
||||
return topics.get_topic_name(self.topic,
|
||||
topics.SECURITY_GROUP,
|
||||
topics.UPDATE)
|
||||
|
||||
def security_groups_rule_updated(self, context, security_groups):
|
||||
"""Notify rule updated security groups."""
|
||||
if not security_groups:
|
||||
return
|
||||
self.fanout_cast(context,
|
||||
self.make_msg('security_groups_rule_updated',
|
||||
security_groups=security_groups),
|
||||
version=SG_RPC_VERSION,
|
||||
topic=self._get_security_group_topic())
|
||||
|
||||
def security_groups_member_updated(self, context, security_groups):
|
||||
"""Notify member updated security groups."""
|
||||
if not security_groups:
|
||||
return
|
||||
self.fanout_cast(context,
|
||||
self.make_msg('security_groups_member_updated',
|
||||
security_groups=security_groups),
|
||||
version=SG_RPC_VERSION,
|
||||
topic=self._get_security_group_topic())
|
||||
|
||||
def security_groups_provider_updated(self, context):
|
||||
"""Notify provider updated security groups."""
|
||||
self.fanout_cast(context,
|
||||
self.make_msg('security_groups_provider_updated'),
|
||||
version=SG_RPC_VERSION,
|
||||
topic=self._get_security_group_topic())
|
|
@ -1,327 +0,0 @@
|
|||
# Copyright 2011 Citrix System.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import urllib
|
||||
|
||||
from oslo.config import cfg
|
||||
from webob import exc
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_filters(request, attr_info, skips=[]):
|
||||
"""Extracts the filters from the request string.
|
||||
|
||||
Returns a dict of lists for the filters:
|
||||
check=a&check=b&name=Bob&
|
||||
becomes:
|
||||
{'check': [u'a', u'b'], 'name': [u'Bob']}
|
||||
"""
|
||||
res = {}
|
||||
for key, values in request.GET.dict_of_lists().iteritems():
|
||||
if key in skips:
|
||||
continue
|
||||
values = [v for v in values if v]
|
||||
key_attr_info = attr_info.get(key, {})
|
||||
if 'convert_list_to' in key_attr_info:
|
||||
values = key_attr_info['convert_list_to'](values)
|
||||
elif 'convert_to' in key_attr_info:
|
||||
convert_to = key_attr_info['convert_to']
|
||||
values = [convert_to(v) for v in values]
|
||||
if values:
|
||||
res[key] = values
|
||||
return res
|
||||
|
||||
|
||||
def get_previous_link(request, items, id_key):
|
||||
params = request.GET.copy()
|
||||
params.pop('marker', None)
|
||||
if items:
|
||||
marker = items[0][id_key]
|
||||
params['marker'] = marker
|
||||
params['page_reverse'] = True
|
||||
return "%s?%s" % (request.path_url, urllib.urlencode(params))
|
||||
|
||||
|
||||
def get_next_link(request, items, id_key):
|
||||
params = request.GET.copy()
|
||||
params.pop('marker', None)
|
||||
if items:
|
||||
marker = items[-1][id_key]
|
||||
params['marker'] = marker
|
||||
params.pop('page_reverse', None)
|
||||
return "%s?%s" % (request.path_url, urllib.urlencode(params))
|
||||
|
||||
|
||||
def get_limit_and_marker(request):
|
||||
"""Return marker, limit tuple from request.
|
||||
|
||||
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
|
||||
GET variables. 'marker' is the id of the last element
|
||||
the client has seen, and 'limit' is the maximum number
|
||||
of items to return. If limit == 0, it means we needn't
|
||||
pagination, then return None.
|
||||
"""
|
||||
max_limit = _get_pagination_max_limit()
|
||||
limit = _get_limit_param(request, max_limit)
|
||||
if max_limit > 0:
|
||||
limit = min(max_limit, limit) or max_limit
|
||||
if not limit:
|
||||
return None, None
|
||||
marker = request.GET.get('marker', None)
|
||||
return limit, marker
|
||||
|
||||
|
||||
def _get_pagination_max_limit():
|
||||
max_limit = -1
|
||||
if (cfg.CONF.pagination_max_limit.lower() !=
|
||||
constants.PAGINATION_INFINITE):
|
||||
try:
|
||||
max_limit = int(cfg.CONF.pagination_max_limit)
|
||||
if max_limit == 0:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
LOG.warn(_("Invalid value for pagination_max_limit: %s. It "
|
||||
"should be an integer greater to 0"),
|
||||
cfg.CONF.pagination_max_limit)
|
||||
return max_limit
|
||||
|
||||
|
||||
def _get_limit_param(request, max_limit):
|
||||
"""Extract integer limit from request or fail."""
|
||||
try:
|
||||
limit = int(request.GET.get('limit', 0))
|
||||
if limit >= 0:
|
||||
return limit
|
||||
except ValueError:
|
||||
pass
|
||||
msg = _("Limit must be an integer 0 or greater and not '%d'")
|
||||
raise exceptions.BadRequest(resource='limit', msg=msg)
|
||||
|
||||
|
||||
def list_args(request, arg):
|
||||
"""Extracts the list of arg from request."""
|
||||
return [v for v in request.GET.getall(arg) if v]
|
||||
|
||||
|
||||
def get_sorts(request, attr_info):
|
||||
"""Extract sort_key and sort_dir from request.
|
||||
|
||||
Return as: [(key1, value1), (key2, value2)]
|
||||
"""
|
||||
sort_keys = list_args(request, "sort_key")
|
||||
sort_dirs = list_args(request, "sort_dir")
|
||||
if len(sort_keys) != len(sort_dirs):
|
||||
msg = _("The number of sort_keys and sort_dirs must be same")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
|
||||
absent_keys = [x for x in sort_keys if x not in attr_info]
|
||||
if absent_keys:
|
||||
msg = _("%s is invalid attribute for sort_keys") % absent_keys
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
invalid_dirs = [x for x in sort_dirs if x not in valid_dirs]
|
||||
if invalid_dirs:
|
||||
msg = (_("%(invalid_dirs)s is invalid value for sort_dirs, "
|
||||
"valid value is '%(asc)s' and '%(desc)s'") %
|
||||
{'invalid_dirs': invalid_dirs,
|
||||
'asc': constants.SORT_DIRECTION_ASC,
|
||||
'desc': constants.SORT_DIRECTION_DESC})
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
return zip(sort_keys,
|
||||
[x == constants.SORT_DIRECTION_ASC for x in sort_dirs])
|
||||
|
||||
|
||||
def get_page_reverse(request):
|
||||
data = request.GET.get('page_reverse', 'False')
|
||||
return data.lower() == "true"
|
||||
|
||||
|
||||
def get_pagination_links(request, items, limit,
|
||||
marker, page_reverse, key="id"):
|
||||
key = key if key else 'id'
|
||||
links = []
|
||||
if not limit:
|
||||
return links
|
||||
if not (len(items) < limit and not page_reverse):
|
||||
links.append({"rel": "next",
|
||||
"href": get_next_link(request, items,
|
||||
key)})
|
||||
if not (len(items) < limit and page_reverse):
|
||||
links.append({"rel": "previous",
|
||||
"href": get_previous_link(request, items,
|
||||
key)})
|
||||
return links
|
||||
|
||||
|
||||
class PaginationHelper(object):
|
||||
|
||||
def __init__(self, request, primary_key='id'):
|
||||
self.request = request
|
||||
self.primary_key = primary_key
|
||||
|
||||
def update_fields(self, original_fields, fields_to_add):
|
||||
pass
|
||||
|
||||
def update_args(self, args):
|
||||
pass
|
||||
|
||||
def paginate(self, items):
|
||||
return items
|
||||
|
||||
def get_links(self, items):
|
||||
return {}
|
||||
|
||||
|
||||
class PaginationEmulatedHelper(PaginationHelper):
|
||||
|
||||
def __init__(self, request, primary_key='id'):
|
||||
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
|
||||
self.limit, self.marker = get_limit_and_marker(request)
|
||||
self.page_reverse = get_page_reverse(request)
|
||||
|
||||
def update_fields(self, original_fields, fields_to_add):
|
||||
if not original_fields:
|
||||
return
|
||||
if self.primary_key not in original_fields:
|
||||
original_fields.append(self.primary_key)
|
||||
fields_to_add.append(self.primary_key)
|
||||
|
||||
def paginate(self, items):
|
||||
if not self.limit:
|
||||
return items
|
||||
i = -1
|
||||
if self.marker:
|
||||
for item in items:
|
||||
i = i + 1
|
||||
if item[self.primary_key] == self.marker:
|
||||
break
|
||||
if self.page_reverse:
|
||||
return items[i - self.limit:i]
|
||||
return items[i + 1:i + self.limit + 1]
|
||||
|
||||
def get_links(self, items):
|
||||
return get_pagination_links(
|
||||
self.request, items, self.limit, self.marker,
|
||||
self.page_reverse, self.primary_key)
|
||||
|
||||
|
||||
class PaginationNativeHelper(PaginationEmulatedHelper):
|
||||
|
||||
def update_args(self, args):
|
||||
if self.primary_key not in dict(args.get('sorts', [])).keys():
|
||||
args.setdefault('sorts', []).append((self.primary_key, True))
|
||||
args.update({'limit': self.limit, 'marker': self.marker,
|
||||
'page_reverse': self.page_reverse})
|
||||
|
||||
def paginate(self, items):
|
||||
return items
|
||||
|
||||
|
||||
class NoPaginationHelper(PaginationHelper):
|
||||
pass
|
||||
|
||||
|
||||
class SortingHelper(object):
|
||||
|
||||
def __init__(self, request, attr_info):
|
||||
pass
|
||||
|
||||
def update_args(self, args):
|
||||
pass
|
||||
|
||||
def update_fields(self, original_fields, fields_to_add):
|
||||
pass
|
||||
|
||||
def sort(self, items):
|
||||
return items
|
||||
|
||||
|
||||
class SortingEmulatedHelper(SortingHelper):
|
||||
|
||||
def __init__(self, request, attr_info):
|
||||
super(SortingEmulatedHelper, self).__init__(request, attr_info)
|
||||
self.sort_dict = get_sorts(request, attr_info)
|
||||
|
||||
def update_fields(self, original_fields, fields_to_add):
|
||||
if not original_fields:
|
||||
return
|
||||
for key in dict(self.sort_dict).keys():
|
||||
if key not in original_fields:
|
||||
original_fields.append(key)
|
||||
fields_to_add.append(key)
|
||||
|
||||
def sort(self, items):
|
||||
def cmp_func(obj1, obj2):
|
||||
for key, direction in self.sort_dict:
|
||||
ret = cmp(obj1[key], obj2[key])
|
||||
if ret:
|
||||
return ret * (1 if direction else -1)
|
||||
return 0
|
||||
return sorted(items, cmp=cmp_func)
|
||||
|
||||
|
||||
class SortingNativeHelper(SortingHelper):
|
||||
|
||||
def __init__(self, request, attr_info):
|
||||
self.sort_dict = get_sorts(request, attr_info)
|
||||
|
||||
def update_args(self, args):
|
||||
args['sorts'] = self.sort_dict
|
||||
|
||||
|
||||
class NoSortingHelper(SortingHelper):
|
||||
pass
|
||||
|
||||
|
||||
class NeutronController(object):
|
||||
"""Base controller class for Neutron API."""
|
||||
# _resource_name will be redefined in sub concrete controller
|
||||
_resource_name = None
|
||||
|
||||
def __init__(self, plugin):
|
||||
self._plugin = plugin
|
||||
super(NeutronController, self).__init__()
|
||||
|
||||
def _prepare_request_body(self, body, params):
|
||||
"""Verifies required parameters are in request body.
|
||||
|
||||
Sets default value for missing optional parameters.
|
||||
Body argument must be the deserialized body.
|
||||
"""
|
||||
try:
|
||||
if body is None:
|
||||
# Initialize empty resource for setting default value
|
||||
body = {self._resource_name: {}}
|
||||
data = body[self._resource_name]
|
||||
except KeyError:
|
||||
# raise if _resource_name is not in req body.
|
||||
raise exc.HTTPBadRequest(_("Unable to find '%s' in request body") %
|
||||
self._resource_name)
|
||||
for param in params:
|
||||
param_name = param['param-name']
|
||||
param_value = data.get(param_name)
|
||||
# If the parameter wasn't found and it was required, return 400
|
||||
if param_value is None and param['required']:
|
||||
msg = (_("Failed to parse request. "
|
||||
"Parameter '%s' not specified") % param_name)
|
||||
LOG.error(msg)
|
||||
raise exc.HTTPBadRequest(msg)
|
||||
data[param_name] = param_value or param.get('default-value')
|
||||
return body
|
|
@ -1,684 +0,0 @@
|
|||
# Copyright 2011 OpenStack Foundation.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import imp
|
||||
import itertools
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
import routes
|
||||
import six
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions
|
||||
import neutron.extensions
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import policy
|
||||
from neutron import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PluginInterface(object):
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, klass):
|
||||
"""Checking plugin class.
|
||||
|
||||
The __subclasshook__ method is a class method
|
||||
that will be called every time a class is tested
|
||||
using issubclass(klass, PluginInterface).
|
||||
In that case, it will check that every method
|
||||
marked with the abstractmethod decorator is
|
||||
provided by the plugin class.
|
||||
"""
|
||||
|
||||
if not cls.__abstractmethods__:
|
||||
return NotImplemented
|
||||
|
||||
for method in cls.__abstractmethods__:
|
||||
if any(method in base.__dict__ for base in klass.__mro__):
|
||||
continue
|
||||
return NotImplemented
|
||||
return True
|
||||
|
||||
|
||||
class ExtensionDescriptor(object):
|
||||
"""Base class that defines the contract for extensions.
|
||||
|
||||
Note that you don't have to derive from this class to have a valid
|
||||
extension; it is purely a convenience.
|
||||
"""
|
||||
|
||||
def get_name(self):
|
||||
"""The name of the extension.
|
||||
|
||||
e.g. 'Fox In Socks'
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_alias(self):
|
||||
"""The alias for the extension.
|
||||
|
||||
e.g. 'FOXNSOX'
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_description(self):
|
||||
"""Friendly description for the extension.
|
||||
|
||||
e.g. 'The Fox In Socks Extension'
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_namespace(self):
|
||||
"""The XML namespace for the extension.
|
||||
|
||||
e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_updated(self):
|
||||
"""The timestamp when the extension was last updated.
|
||||
|
||||
e.g. '2011-01-22T13:25:27-06:00'
|
||||
"""
|
||||
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_resources(self):
|
||||
"""List of extensions.ResourceExtension extension objects.
|
||||
|
||||
Resources define new nouns, and are accessible through URLs.
|
||||
"""
|
||||
resources = []
|
||||
return resources
|
||||
|
||||
def get_actions(self):
|
||||
"""List of extensions.ActionExtension extension objects.
|
||||
|
||||
Actions are verbs callable from the API.
|
||||
"""
|
||||
actions = []
|
||||
return actions
|
||||
|
||||
def get_request_extensions(self):
|
||||
"""List of extensions.RequestException extension objects.
|
||||
|
||||
Request extensions are used to handle custom request data.
|
||||
"""
|
||||
request_exts = []
|
||||
return request_exts
|
||||
|
||||
def get_extended_resources(self, version):
|
||||
"""Retrieve extended resources or attributes for core resources.
|
||||
|
||||
Extended attributes are implemented by a core plugin similarly
|
||||
to the attributes defined in the core, and can appear in
|
||||
request and response messages. Their names are scoped with the
|
||||
extension's prefix. The core API version is passed to this
|
||||
function, which must return a
|
||||
map[<resource_name>][<attribute_name>][<attribute_property>]
|
||||
specifying the extended resource attribute properties required
|
||||
by that API version.
|
||||
|
||||
Extension can add resources and their attr definitions too.
|
||||
The returned map can be integrated into RESOURCE_ATTRIBUTE_MAP.
|
||||
"""
|
||||
return {}
|
||||
|
||||
def get_plugin_interface(self):
|
||||
"""Returns an abstract class which defines contract for the plugin.
|
||||
|
||||
The abstract class should inherit from extesnions.PluginInterface,
|
||||
Methods in this abstract class should be decorated as abstractmethod
|
||||
"""
|
||||
return None
|
||||
|
||||
def update_attributes_map(self, extended_attributes,
|
||||
extension_attrs_map=None):
|
||||
"""Update attributes map for this extension.
|
||||
|
||||
This is default method for extending an extension's attributes map.
|
||||
An extension can use this method and supplying its own resource
|
||||
attribute map in extension_attrs_map argument to extend all its
|
||||
attributes that needs to be extended.
|
||||
|
||||
If an extension does not implement update_attributes_map, the method
|
||||
does nothing and just return.
|
||||
"""
|
||||
if not extension_attrs_map:
|
||||
return
|
||||
|
||||
for resource, attrs in extension_attrs_map.iteritems():
|
||||
extended_attrs = extended_attributes.get(resource)
|
||||
if extended_attrs:
|
||||
attrs.update(extended_attrs)
|
||||
|
||||
def get_alias_namespace_compatibility_map(self):
|
||||
"""Returns mappings between extension aliases and XML namespaces.
|
||||
|
||||
The mappings are XML namespaces that should, for backward compatibility
|
||||
reasons, be added to the XML serialization of extended attributes.
|
||||
This allows an established extended attribute to be provided by
|
||||
another extension than the original one while keeping its old alias
|
||||
in the name.
|
||||
:return: A dictionary of extension_aliases and namespace strings.
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class ActionExtensionController(wsgi.Controller):
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
self.action_handlers = {}
|
||||
|
||||
def add_action(self, action_name, handler):
|
||||
self.action_handlers[action_name] = handler
|
||||
|
||||
def action(self, request, id):
|
||||
input_dict = self._deserialize(request.body,
|
||||
request.get_content_type())
|
||||
for action_name, handler in self.action_handlers.iteritems():
|
||||
if action_name in input_dict:
|
||||
return handler(input_dict, request, id)
|
||||
# no action handler found (bump to downstream application)
|
||||
response = self.application
|
||||
return response
|
||||
|
||||
|
||||
class RequestExtensionController(wsgi.Controller):
|
||||
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
self.handlers = []
|
||||
|
||||
def add_handler(self, handler):
|
||||
self.handlers.append(handler)
|
||||
|
||||
def process(self, request, *args, **kwargs):
|
||||
res = request.get_response(self.application)
|
||||
# currently request handlers are un-ordered
|
||||
for handler in self.handlers:
|
||||
response = handler(request, res)
|
||||
return response
|
||||
|
||||
|
||||
class ExtensionController(wsgi.Controller):
|
||||
|
||||
def __init__(self, extension_manager):
|
||||
self.extension_manager = extension_manager
|
||||
|
||||
def _translate(self, ext):
|
||||
ext_data = {}
|
||||
ext_data['name'] = ext.get_name()
|
||||
ext_data['alias'] = ext.get_alias()
|
||||
ext_data['description'] = ext.get_description()
|
||||
ext_data['namespace'] = ext.get_namespace()
|
||||
ext_data['updated'] = ext.get_updated()
|
||||
ext_data['links'] = [] # TODO(dprince): implement extension links
|
||||
return ext_data
|
||||
|
||||
def index(self, request):
|
||||
extensions = []
|
||||
for _alias, ext in self.extension_manager.extensions.iteritems():
|
||||
extensions.append(self._translate(ext))
|
||||
return dict(extensions=extensions)
|
||||
|
||||
def show(self, request, id):
|
||||
# NOTE(dprince): the extensions alias is used as the 'id' for show
|
||||
ext = self.extension_manager.extensions.get(id, None)
|
||||
if not ext:
|
||||
raise webob.exc.HTTPNotFound(
|
||||
_("Extension with alias %s does not exist") % id)
|
||||
return dict(extension=self._translate(ext))
|
||||
|
||||
def delete(self, request, id):
|
||||
msg = _('Resource not found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
|
||||
def create(self, request):
|
||||
msg = _('Resource not found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
|
||||
|
||||
class ExtensionMiddleware(wsgi.Middleware):
|
||||
"""Extensions middleware for WSGI."""
|
||||
|
||||
def __init__(self, application,
|
||||
ext_mgr=None):
|
||||
self.ext_mgr = (ext_mgr
|
||||
or ExtensionManager(get_extensions_path()))
|
||||
mapper = routes.Mapper()
|
||||
|
||||
# extended resources
|
||||
for resource in self.ext_mgr.get_resources():
|
||||
path_prefix = resource.path_prefix
|
||||
if resource.parent:
|
||||
path_prefix = (resource.path_prefix +
|
||||
"/%s/{%s_id}" %
|
||||
(resource.parent["collection_name"],
|
||||
resource.parent["member_name"]))
|
||||
|
||||
LOG.debug(_('Extended resource: %s'),
|
||||
resource.collection)
|
||||
for action, method in resource.collection_actions.iteritems():
|
||||
conditions = dict(method=[method])
|
||||
path = "/%s/%s" % (resource.collection, action)
|
||||
with mapper.submapper(controller=resource.controller,
|
||||
action=action,
|
||||
path_prefix=path_prefix,
|
||||
conditions=conditions) as submap:
|
||||
submap.connect(path)
|
||||
submap.connect("%s.:(format)" % path)
|
||||
|
||||
mapper.resource(resource.collection, resource.collection,
|
||||
controller=resource.controller,
|
||||
member=resource.member_actions,
|
||||
parent_resource=resource.parent,
|
||||
path_prefix=path_prefix)
|
||||
|
||||
# extended actions
|
||||
action_controllers = self._action_ext_controllers(application,
|
||||
self.ext_mgr, mapper)
|
||||
for action in self.ext_mgr.get_actions():
|
||||
LOG.debug(_('Extended action: %s'), action.action_name)
|
||||
controller = action_controllers[action.collection]
|
||||
controller.add_action(action.action_name, action.handler)
|
||||
|
||||
# extended requests
|
||||
req_controllers = self._request_ext_controllers(application,
|
||||
self.ext_mgr, mapper)
|
||||
for request_ext in self.ext_mgr.get_request_extensions():
|
||||
LOG.debug(_('Extended request: %s'), request_ext.key)
|
||||
controller = req_controllers[request_ext.key]
|
||||
controller.add_handler(request_ext.handler)
|
||||
|
||||
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
|
||||
mapper)
|
||||
super(ExtensionMiddleware, self).__init__(application)
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_config, **local_config):
|
||||
"""Paste factory."""
|
||||
def _factory(app):
|
||||
return cls(app, global_config, **local_config)
|
||||
return _factory
|
||||
|
||||
def _action_ext_controllers(self, application, ext_mgr, mapper):
|
||||
"""Return a dict of ActionExtensionController-s by collection."""
|
||||
action_controllers = {}
|
||||
for action in ext_mgr.get_actions():
|
||||
if action.collection not in action_controllers.keys():
|
||||
controller = ActionExtensionController(application)
|
||||
mapper.connect("/%s/:(id)/action.:(format)" %
|
||||
action.collection,
|
||||
action='action',
|
||||
controller=controller,
|
||||
conditions=dict(method=['POST']))
|
||||
mapper.connect("/%s/:(id)/action" % action.collection,
|
||||
action='action',
|
||||
controller=controller,
|
||||
conditions=dict(method=['POST']))
|
||||
action_controllers[action.collection] = controller
|
||||
|
||||
return action_controllers
|
||||
|
||||
def _request_ext_controllers(self, application, ext_mgr, mapper):
|
||||
"""Returns a dict of RequestExtensionController-s by collection."""
|
||||
request_ext_controllers = {}
|
||||
for req_ext in ext_mgr.get_request_extensions():
|
||||
if req_ext.key not in request_ext_controllers.keys():
|
||||
controller = RequestExtensionController(application)
|
||||
mapper.connect(req_ext.url_route + '.:(format)',
|
||||
action='process',
|
||||
controller=controller,
|
||||
conditions=req_ext.conditions)
|
||||
|
||||
mapper.connect(req_ext.url_route,
|
||||
action='process',
|
||||
controller=controller,
|
||||
conditions=req_ext.conditions)
|
||||
request_ext_controllers[req_ext.key] = controller
|
||||
|
||||
return request_ext_controllers
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
"""Route the incoming request with router."""
|
||||
req.environ['extended.app'] = self.application
|
||||
return self._router
|
||||
|
||||
@staticmethod
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def _dispatch(req):
|
||||
"""Dispatch the request.
|
||||
|
||||
Returns the routed WSGI app's response or defers to the extended
|
||||
application.
|
||||
"""
|
||||
match = req.environ['wsgiorg.routing_args'][1]
|
||||
if not match:
|
||||
return req.environ['extended.app']
|
||||
app = match['controller']
|
||||
return app
|
||||
|
||||
|
||||
def plugin_aware_extension_middleware_factory(global_config, **local_config):
|
||||
"""Paste factory."""
|
||||
def _factory(app):
|
||||
ext_mgr = PluginAwareExtensionManager.get_instance()
|
||||
return ExtensionMiddleware(app, ext_mgr=ext_mgr)
|
||||
return _factory
|
||||
|
||||
|
||||
class ExtensionManager(object):
|
||||
"""Load extensions from the configured extension path.
|
||||
|
||||
See tests/unit/extensions/foxinsocks.py for an
|
||||
example extension implementation.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
LOG.info(_('Initializing extension manager.'))
|
||||
self.path = path
|
||||
self.extensions = {}
|
||||
self._load_all_extensions()
|
||||
policy.reset()
|
||||
|
||||
def get_resources(self):
|
||||
"""Returns a list of ResourceExtension objects."""
|
||||
resources = []
|
||||
resources.append(ResourceExtension('extensions',
|
||||
ExtensionController(self)))
|
||||
for ext in self.extensions.itervalues():
|
||||
try:
|
||||
resources.extend(ext.get_resources())
|
||||
except AttributeError:
|
||||
# NOTE(dprince): Extension aren't required to have resource
|
||||
# extensions
|
||||
pass
|
||||
return resources
|
||||
|
||||
def get_actions(self):
|
||||
"""Returns a list of ActionExtension objects."""
|
||||
actions = []
|
||||
for ext in self.extensions.itervalues():
|
||||
try:
|
||||
actions.extend(ext.get_actions())
|
||||
except AttributeError:
|
||||
# NOTE(dprince): Extension aren't required to have action
|
||||
# extensions
|
||||
pass
|
||||
return actions
|
||||
|
||||
def get_request_extensions(self):
|
||||
"""Returns a list of RequestExtension objects."""
|
||||
request_exts = []
|
||||
for ext in self.extensions.itervalues():
|
||||
try:
|
||||
request_exts.extend(ext.get_request_extensions())
|
||||
except AttributeError:
|
||||
# NOTE(dprince): Extension aren't required to have request
|
||||
# extensions
|
||||
pass
|
||||
return request_exts
|
||||
|
||||
def extend_resources(self, version, attr_map):
|
||||
"""Extend resources with additional resources or attributes.
|
||||
|
||||
:param: attr_map, the existing mapping from resource name to
|
||||
attrs definition.
|
||||
|
||||
After this function, we will extend the attr_map if an extension
|
||||
wants to extend this map.
|
||||
"""
|
||||
update_exts = []
|
||||
processed_exts = set()
|
||||
exts_to_process = self.extensions.copy()
|
||||
# Iterate until there are unprocessed extensions or if no progress
|
||||
# is made in a whole iteration
|
||||
while exts_to_process:
|
||||
processed_ext_count = len(processed_exts)
|
||||
for ext_name, ext in exts_to_process.items():
|
||||
if not hasattr(ext, 'get_extended_resources'):
|
||||
del exts_to_process[ext_name]
|
||||
continue
|
||||
if hasattr(ext, 'update_attributes_map'):
|
||||
update_exts.append(ext)
|
||||
if hasattr(ext, 'get_required_extensions'):
|
||||
# Process extension only if all required extensions
|
||||
# have been processed already
|
||||
required_exts_set = set(ext.get_required_extensions())
|
||||
if required_exts_set - processed_exts:
|
||||
continue
|
||||
try:
|
||||
extended_attrs = ext.get_extended_resources(version)
|
||||
for resource, resource_attrs in extended_attrs.iteritems():
|
||||
if attr_map.get(resource, None):
|
||||
attr_map[resource].update(resource_attrs)
|
||||
else:
|
||||
attr_map[resource] = resource_attrs
|
||||
if extended_attrs:
|
||||
attributes.EXT_NSES[ext.get_alias()] = (
|
||||
ext.get_namespace())
|
||||
except AttributeError:
|
||||
LOG.exception(_("Error fetching extended attributes for "
|
||||
"extension '%s'"), ext.get_name())
|
||||
try:
|
||||
comp_map = ext.get_alias_namespace_compatibility_map()
|
||||
attributes.EXT_NSES_BC.update(comp_map)
|
||||
except AttributeError:
|
||||
LOG.info(_("Extension '%s' provides no backward "
|
||||
"compatibility map for extended attributes"),
|
||||
ext.get_name())
|
||||
processed_exts.add(ext_name)
|
||||
del exts_to_process[ext_name]
|
||||
if len(processed_exts) == processed_ext_count:
|
||||
# Exit loop as no progress was made
|
||||
break
|
||||
if exts_to_process:
|
||||
# NOTE(salv-orlando): Consider whether this error should be fatal
|
||||
LOG.error(_("It was impossible to process the following "
|
||||
"extensions: %s because of missing requirements."),
|
||||
','.join(exts_to_process.keys()))
|
||||
|
||||
# Extending extensions' attributes map.
|
||||
for ext in update_exts:
|
||||
ext.update_attributes_map(attr_map)
|
||||
|
||||
def _check_extension(self, extension):
|
||||
"""Checks for required methods in extension objects."""
|
||||
try:
|
||||
LOG.debug(_('Ext name: %s'), extension.get_name())
|
||||
LOG.debug(_('Ext alias: %s'), extension.get_alias())
|
||||
LOG.debug(_('Ext description: %s'), extension.get_description())
|
||||
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
|
||||
LOG.debug(_('Ext updated: %s'), extension.get_updated())
|
||||
except AttributeError as ex:
|
||||
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
|
||||
return False
|
||||
return True
|
||||
|
||||
def _load_all_extensions(self):
|
||||
"""Load extensions from the configured path.
|
||||
|
||||
The extension name is constructed from the module_name. If your
|
||||
extension module is named widgets.py, the extension class within that
|
||||
module should be 'Widgets'.
|
||||
|
||||
See tests/unit/extensions/foxinsocks.py for an example extension
|
||||
implementation.
|
||||
"""
|
||||
for path in self.path.split(':'):
|
||||
if os.path.exists(path):
|
||||
self._load_all_extensions_from_path(path)
|
||||
else:
|
||||
LOG.error(_("Extension path '%s' doesn't exist!"), path)
|
||||
|
||||
def _load_all_extensions_from_path(self, path):
|
||||
# Sorting the extension list makes the order in which they
|
||||
# are loaded predictable across a cluster of load-balanced
|
||||
# Neutron Servers
|
||||
for f in sorted(os.listdir(path)):
|
||||
try:
|
||||
LOG.debug(_('Loading extension file: %s'), f)
|
||||
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
|
||||
ext_path = os.path.join(path, f)
|
||||
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
|
||||
mod = imp.load_source(mod_name, ext_path)
|
||||
ext_name = mod_name[0].upper() + mod_name[1:]
|
||||
new_ext_class = getattr(mod, ext_name, None)
|
||||
if not new_ext_class:
|
||||
LOG.warn(_('Did not find expected name '
|
||||
'"%(ext_name)s" in %(file)s'),
|
||||
{'ext_name': ext_name,
|
||||
'file': ext_path})
|
||||
continue
|
||||
new_ext = new_ext_class()
|
||||
self.add_extension(new_ext)
|
||||
except Exception as exception:
|
||||
LOG.warn(_("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s"), {'f': f, 'exception': exception})
|
||||
|
||||
def add_extension(self, ext):
|
||||
# Do nothing if the extension doesn't check out
|
||||
if not self._check_extension(ext):
|
||||
return
|
||||
|
||||
alias = ext.get_alias()
|
||||
LOG.info(_('Loaded extension: %s'), alias)
|
||||
|
||||
if alias in self.extensions:
|
||||
raise exceptions.DuplicatedExtension(alias=alias)
|
||||
self.extensions[alias] = ext
|
||||
|
||||
|
||||
class PluginAwareExtensionManager(ExtensionManager):
|
||||
|
||||
_instance = None
|
||||
|
||||
def __init__(self, path, plugins):
|
||||
self.plugins = plugins
|
||||
super(PluginAwareExtensionManager, self).__init__(path)
|
||||
self.check_if_plugin_extensions_loaded()
|
||||
|
||||
def _check_extension(self, extension):
|
||||
"""Check if an extension is supported by any plugin."""
|
||||
extension_is_valid = super(PluginAwareExtensionManager,
|
||||
self)._check_extension(extension)
|
||||
return (extension_is_valid and
|
||||
self._plugins_support(extension) and
|
||||
self._plugins_implement_interface(extension))
|
||||
|
||||
def _plugins_support(self, extension):
|
||||
alias = extension.get_alias()
|
||||
supports_extension = any((hasattr(plugin,
|
||||
"supported_extension_aliases") and
|
||||
alias in plugin.supported_extension_aliases)
|
||||
for plugin in self.plugins.values())
|
||||
if not supports_extension:
|
||||
LOG.warn(_("Extension %s not supported by any of loaded plugins"),
|
||||
alias)
|
||||
return supports_extension
|
||||
|
||||
def _plugins_implement_interface(self, extension):
|
||||
if(not hasattr(extension, "get_plugin_interface") or
|
||||
extension.get_plugin_interface() is None):
|
||||
return True
|
||||
for plugin in self.plugins.values():
|
||||
if isinstance(plugin, extension.get_plugin_interface()):
|
||||
return True
|
||||
LOG.warn(_("Loaded plugins do not implement extension %s interface"),
|
||||
extension.get_alias())
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = cls(get_extensions_path(),
|
||||
manager.NeutronManager.get_service_plugins())
|
||||
return cls._instance
|
||||
|
||||
def check_if_plugin_extensions_loaded(self):
|
||||
"""Check if an extension supported by a plugin has been loaded."""
|
||||
plugin_extensions = set(itertools.chain.from_iterable([
|
||||
getattr(plugin, "supported_extension_aliases", [])
|
||||
for plugin in self.plugins.values()]))
|
||||
missing_aliases = plugin_extensions - set(self.extensions)
|
||||
if missing_aliases:
|
||||
raise exceptions.ExtensionsNotFound(
|
||||
extensions=list(missing_aliases))
|
||||
|
||||
|
||||
class RequestExtension(object):
|
||||
"""Extend requests and responses of core Neutron OpenStack API controllers.
|
||||
|
||||
Provide a way to add data to responses and handle custom request data
|
||||
that is sent to core Neutron OpenStack API controllers.
|
||||
"""
|
||||
|
||||
def __init__(self, method, url_route, handler):
|
||||
self.url_route = url_route
|
||||
self.handler = handler
|
||||
self.conditions = dict(method=[method])
|
||||
self.key = "%s-%s" % (method, url_route)
|
||||
|
||||
|
||||
class ActionExtension(object):
|
||||
"""Add custom actions to core Neutron OpenStack API controllers."""
|
||||
|
||||
def __init__(self, collection, action_name, handler):
|
||||
self.collection = collection
|
||||
self.action_name = action_name
|
||||
self.handler = handler
|
||||
|
||||
|
||||
class ResourceExtension(object):
|
||||
"""Add top level resources to the OpenStack API in Neutron."""
|
||||
|
||||
def __init__(self, collection, controller, parent=None, path_prefix="",
|
||||
collection_actions={}, member_actions={}, attr_map={}):
|
||||
self.collection = collection
|
||||
self.controller = controller
|
||||
self.parent = parent
|
||||
self.collection_actions = collection_actions
|
||||
self.member_actions = member_actions
|
||||
self.path_prefix = path_prefix
|
||||
self.attr_map = attr_map
|
||||
|
||||
|
||||
# Returns the extension paths from a config entry and the __path__
|
||||
# of neutron.extensions
|
||||
def get_extensions_path():
|
||||
paths = ':'.join(neutron.extensions.__path__)
|
||||
if cfg.CONF.api_extensions_path:
|
||||
paths = ':'.join([cfg.CONF.api_extensions_path, paths])
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
def append_api_extensions_path(paths):
|
||||
paths = [cfg.CONF.api_extensions_path] + paths
|
||||
cfg.CONF.set_override('api_extensions_path',
|
||||
':'.join([p for p in paths if p]))
|
|
@ -1,177 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DhcpAgentNotifyAPI(n_rpc.RpcProxy):
|
||||
"""API for plugin to notify DHCP agent."""
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
# It seems dhcp agent does not support bulk operation
|
||||
VALID_RESOURCES = ['network', 'subnet', 'port']
|
||||
VALID_METHOD_NAMES = ['network.create.end',
|
||||
'network.update.end',
|
||||
'network.delete.end',
|
||||
'subnet.create.end',
|
||||
'subnet.update.end',
|
||||
'subnet.delete.end',
|
||||
'port.create.end',
|
||||
'port.update.end',
|
||||
'port.delete.end']
|
||||
|
||||
def __init__(self, topic=topics.DHCP_AGENT, plugin=None):
|
||||
super(DhcpAgentNotifyAPI, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
self._plugin = plugin
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
if self._plugin is None:
|
||||
self._plugin = manager.NeutronManager.get_plugin()
|
||||
return self._plugin
|
||||
|
||||
def _schedule_network(self, context, network, existing_agents):
|
||||
"""Schedule the network to new agents
|
||||
|
||||
:return: all agents associated with the network
|
||||
"""
|
||||
new_agents = self.plugin.schedule_network(context, network) or []
|
||||
if new_agents:
|
||||
for agent in new_agents:
|
||||
self._cast_message(
|
||||
context, 'network_create_end',
|
||||
{'network': {'id': network['id']}}, agent['host'])
|
||||
elif not existing_agents:
|
||||
LOG.warn(_('Unable to schedule network %s: no agents available; '
|
||||
'will retry on subsequent port creation events.'),
|
||||
network['id'])
|
||||
return new_agents + existing_agents
|
||||
|
||||
def _get_enabled_agents(self, context, network, agents, method, payload):
|
||||
"""Get the list of agents whose admin_state is UP."""
|
||||
network_id = network['id']
|
||||
enabled_agents = [x for x in agents if x.admin_state_up]
|
||||
active_agents = [x for x in agents if x.is_active]
|
||||
len_enabled_agents = len(enabled_agents)
|
||||
len_active_agents = len(active_agents)
|
||||
if len_active_agents < len_enabled_agents:
|
||||
LOG.warn(_("Only %(active)d of %(total)d DHCP agents associated "
|
||||
"with network '%(net_id)s' are marked as active, so "
|
||||
" notifications may be sent to inactive agents.")
|
||||
% {'active': len_active_agents,
|
||||
'total': len_enabled_agents,
|
||||
'net_id': network_id})
|
||||
if not enabled_agents:
|
||||
num_ports = self.plugin.get_ports_count(
|
||||
context, {'network_id': [network_id]})
|
||||
notification_required = (
|
||||
num_ports > 0 and len(network['subnets']) >= 1)
|
||||
if notification_required:
|
||||
LOG.error(_("Will not send event %(method)s for network "
|
||||
"%(net_id)s: no agent available. Payload: "
|
||||
"%(payload)s")
|
||||
% {'method': method,
|
||||
'net_id': network_id,
|
||||
'payload': payload})
|
||||
return enabled_agents
|
||||
|
||||
def _notify_agents(self, context, method, payload, network_id):
|
||||
"""Notify all the agents that are hosting the network."""
|
||||
# fanout is required as we do not know who is "listening"
|
||||
no_agents = not utils.is_extension_supported(
|
||||
self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)
|
||||
fanout_required = method == 'network_delete_end' or no_agents
|
||||
|
||||
# we do nothing on network creation because we want to give the
|
||||
# admin the chance to associate an agent to the network manually
|
||||
cast_required = method != 'network_create_end'
|
||||
|
||||
if fanout_required:
|
||||
self._fanout_message(context, method, payload)
|
||||
elif cast_required:
|
||||
admin_ctx = (context if context.is_admin else context.elevated())
|
||||
network = self.plugin.get_network(admin_ctx, network_id)
|
||||
agents = self.plugin.get_dhcp_agents_hosting_networks(
|
||||
context, [network_id])
|
||||
|
||||
# schedule the network first, if needed
|
||||
schedule_required = method == 'port_create_end'
|
||||
if schedule_required:
|
||||
agents = self._schedule_network(admin_ctx, network, agents)
|
||||
|
||||
enabled_agents = self._get_enabled_agents(
|
||||
context, network, agents, method, payload)
|
||||
for agent in enabled_agents:
|
||||
self._cast_message(
|
||||
context, method, payload, agent.host, agent.topic)
|
||||
|
||||
def _cast_message(self, context, method, payload, host,
|
||||
topic=topics.DHCP_AGENT):
|
||||
"""Cast the payload to the dhcp agent running on the host."""
|
||||
self.cast(
|
||||
context, self.make_msg(method,
|
||||
payload=payload),
|
||||
topic='%s.%s' % (topic, host))
|
||||
|
||||
def _fanout_message(self, context, method, payload):
|
||||
"""Fanout the payload to all dhcp agents."""
|
||||
self.fanout_cast(
|
||||
context, self.make_msg(method,
|
||||
payload=payload),
|
||||
topic=topics.DHCP_AGENT)
|
||||
|
||||
def network_removed_from_agent(self, context, network_id, host):
|
||||
self._cast_message(context, 'network_delete_end',
|
||||
{'network_id': network_id}, host)
|
||||
|
||||
def network_added_to_agent(self, context, network_id, host):
|
||||
self._cast_message(context, 'network_create_end',
|
||||
{'network': {'id': network_id}}, host)
|
||||
|
||||
def agent_updated(self, context, admin_state_up, host):
|
||||
self._cast_message(context, 'agent_updated',
|
||||
{'admin_state_up': admin_state_up}, host)
|
||||
|
||||
def notify(self, context, data, method_name):
|
||||
# data is {'key' : 'value'} with only one key
|
||||
if method_name not in self.VALID_METHOD_NAMES:
|
||||
return
|
||||
obj_type = data.keys()[0]
|
||||
if obj_type not in self.VALID_RESOURCES:
|
||||
return
|
||||
obj_value = data[obj_type]
|
||||
network_id = None
|
||||
if obj_type == 'network' and 'id' in obj_value:
|
||||
network_id = obj_value['id']
|
||||
elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value:
|
||||
network_id = obj_value['network_id']
|
||||
if not network_id:
|
||||
return
|
||||
method_name = method_name.replace(".", "_")
|
||||
if method_name.endswith("_delete_end"):
|
||||
if 'id' in obj_value:
|
||||
self._notify_agents(context, method_name,
|
||||
{obj_type + '_id': obj_value['id']},
|
||||
network_id)
|
||||
else:
|
||||
self._notify_agents(context, method_name, data, network_id)
|
|
@ -1,149 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as service_constants
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L3AgentNotifyAPI(n_rpc.RpcProxy):
|
||||
"""API for plugin to notify L3 agent."""
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self, topic=topics.L3_AGENT):
|
||||
super(L3AgentNotifyAPI, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def _notification_host(self, context, method, payload, host):
|
||||
"""Notify the agent that is hosting the router."""
|
||||
LOG.debug(_('Nofity agent at %(host)s the message '
|
||||
'%(method)s'), {'host': host,
|
||||
'method': method})
|
||||
self.cast(
|
||||
context, self.make_msg(method,
|
||||
payload=payload),
|
||||
topic='%s.%s' % (topics.L3_AGENT, host))
|
||||
|
||||
def _agent_notification(self, context, method, router_ids,
|
||||
operation, data):
|
||||
"""Notify changed routers to hosting l3 agents."""
|
||||
adminContext = context.is_admin and context or context.elevated()
|
||||
plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
for router_id in router_ids:
|
||||
l3_agents = plugin.get_l3_agents_hosting_routers(
|
||||
adminContext, [router_id],
|
||||
admin_state_up=True,
|
||||
active=True)
|
||||
for l3_agent in l3_agents:
|
||||
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
|
||||
'%(method)s'),
|
||||
{'topic': l3_agent.topic,
|
||||
'host': l3_agent.host,
|
||||
'method': method})
|
||||
self.cast(
|
||||
context, self.make_msg(method,
|
||||
routers=[router_id]),
|
||||
topic='%s.%s' % (l3_agent.topic, l3_agent.host),
|
||||
version='1.1')
|
||||
|
||||
def _agent_notification_arp(self, context, method, router_id,
|
||||
operation, data):
|
||||
"""Notify arp details to l3 agents hosting router."""
|
||||
if not router_id:
|
||||
return
|
||||
adminContext = (context.is_admin and
|
||||
context or context.elevated())
|
||||
plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
l3_agents = (plugin.
|
||||
get_l3_agents_hosting_routers(adminContext,
|
||||
[router_id],
|
||||
admin_state_up=True,
|
||||
active=True))
|
||||
for l3_agent in l3_agents:
|
||||
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
|
||||
'%(method)s'),
|
||||
{'topic': l3_agent.topic,
|
||||
'host': l3_agent.host,
|
||||
'method': method})
|
||||
dvr_arptable = {'router_id': router_id,
|
||||
'arp_table': data}
|
||||
self.cast(
|
||||
context, self.make_msg(method,
|
||||
payload=dvr_arptable),
|
||||
topic='%s.%s' % (l3_agent.topic, l3_agent.host),
|
||||
version='1.1')
|
||||
|
||||
def _notification(self, context, method, router_ids, operation, data):
|
||||
"""Notify all the agents that are hosting the routers."""
|
||||
plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
if not plugin:
|
||||
LOG.error(_('No plugin for L3 routing registered. Cannot notify '
|
||||
'agents with the message %s'), method)
|
||||
return
|
||||
if utils.is_extension_supported(
|
||||
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
|
||||
adminContext = (context.is_admin and
|
||||
context or context.elevated())
|
||||
plugin.schedule_routers(adminContext, router_ids, hints=data)
|
||||
self._agent_notification(
|
||||
context, method, router_ids, operation, data)
|
||||
else:
|
||||
self.fanout_cast(
|
||||
context, self.make_msg(method,
|
||||
routers=router_ids),
|
||||
topic=topics.L3_AGENT)
|
||||
|
||||
def _notification_fanout(self, context, method, router_id):
|
||||
"""Fanout the deleted router to all L3 agents."""
|
||||
LOG.debug(_('Fanout notify agent at %(topic)s the message '
|
||||
'%(method)s on router %(router_id)s'),
|
||||
{'topic': topics.L3_AGENT,
|
||||
'method': method,
|
||||
'router_id': router_id})
|
||||
self.fanout_cast(
|
||||
context, self.make_msg(method,
|
||||
router_id=router_id),
|
||||
topic=topics.L3_AGENT)
|
||||
|
||||
def agent_updated(self, context, admin_state_up, host):
|
||||
self._notification_host(context, 'agent_updated',
|
||||
{'admin_state_up': admin_state_up},
|
||||
host)
|
||||
|
||||
def router_deleted(self, context, router_id):
|
||||
self._notification_fanout(context, 'router_deleted', router_id)
|
||||
|
||||
def routers_updated(self, context, router_ids, operation=None, data=None):
|
||||
if router_ids:
|
||||
self._notification(context, 'routers_updated', router_ids,
|
||||
operation, data)
|
||||
|
||||
def router_removed_from_agent(self, context, router_id, host):
|
||||
self._notification_host(context, 'router_removed_from_agent',
|
||||
{'router_id': router_id}, host)
|
||||
|
||||
def router_added_to_agent(self, context, router_ids, host):
|
||||
self._notification_host(context, 'router_added_to_agent',
|
||||
router_ids, host)
|
|
@ -1,99 +0,0 @@
|
|||
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
|
||||
#
|
||||
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as service_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MeteringAgentNotifyAPI(n_rpc.RpcProxy):
|
||||
"""API for plugin to notify L3 metering agent."""
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self, topic=topics.METERING_AGENT):
|
||||
super(MeteringAgentNotifyAPI, self).__init__(
|
||||
topic=topic, default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def _agent_notification(self, context, method, routers):
|
||||
"""Notify l3 metering agents hosted by l3 agent hosts."""
|
||||
adminContext = context.is_admin and context or context.elevated()
|
||||
plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
|
||||
l3_routers = {}
|
||||
for router in routers:
|
||||
l3_agents = plugin.get_l3_agents_hosting_routers(
|
||||
adminContext, [router['id']],
|
||||
admin_state_up=True,
|
||||
active=True)
|
||||
for l3_agent in l3_agents:
|
||||
LOG.debug(_('Notify metering agent at %(topic)s.%(host)s '
|
||||
'the message %(method)s'),
|
||||
{'topic': self.topic,
|
||||
'host': l3_agent.host,
|
||||
'method': method})
|
||||
|
||||
l3_router = l3_routers.get(l3_agent.host, [])
|
||||
l3_router.append(router)
|
||||
l3_routers[l3_agent.host] = l3_router
|
||||
|
||||
for host, routers in l3_routers.iteritems():
|
||||
self.cast(context, self.make_msg(method, routers=routers),
|
||||
topic='%s.%s' % (self.topic, host))
|
||||
|
||||
def _notification_fanout(self, context, method, router_id):
|
||||
LOG.debug(_('Fanout notify metering agent at %(topic)s the message '
|
||||
'%(method)s on router %(router_id)s'),
|
||||
{'topic': self.topic,
|
||||
'method': method,
|
||||
'router_id': router_id})
|
||||
self.fanout_cast(
|
||||
context, self.make_msg(method,
|
||||
router_id=router_id),
|
||||
topic=self.topic)
|
||||
|
||||
def _notification(self, context, method, routers):
|
||||
"""Notify all the agents that are hosting the routers."""
|
||||
plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
if utils.is_extension_supported(
|
||||
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
|
||||
self._agent_notification(context, method, routers)
|
||||
else:
|
||||
self.fanout_cast(context, self.make_msg(method, routers=routers),
|
||||
topic=self.topic)
|
||||
|
||||
def router_deleted(self, context, router_id):
|
||||
self._notification_fanout(context, 'router_deleted', router_id)
|
||||
|
||||
def routers_updated(self, context, routers):
|
||||
if routers:
|
||||
self._notification(context, 'routers_updated', routers)
|
||||
|
||||
def update_metering_label_rules(self, context, routers):
|
||||
self._notification(context, 'update_metering_label_rules', routers)
|
||||
|
||||
def add_metering_label(self, context, routers):
|
||||
self._notification(context, 'add_metering_label', routers)
|
||||
|
||||
def remove_metering_label(self, context, routers):
|
||||
self._notification(context, 'remove_metering_label', routers)
|
|
@ -1,122 +0,0 @@
|
|||
# Copyright 2014, Hewlett Packard, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import log
|
||||
from neutron.common import topics
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DVRServerRpcApiMixin(object):
|
||||
"""Agent-side RPC (stub) for agent-to-plugin interaction."""
|
||||
|
||||
DVR_RPC_VERSION = "1.0"
|
||||
|
||||
@log.log
|
||||
def get_dvr_mac_address_by_host(self, context, host):
|
||||
return self.call(context,
|
||||
self.make_msg('get_dvr_mac_address_by_host',
|
||||
host=host),
|
||||
version=self.DVR_RPC_VERSION,
|
||||
topic=self.topic)
|
||||
|
||||
@log.log
|
||||
def get_dvr_mac_address_list(self, context):
|
||||
return self.call(context,
|
||||
self.make_msg('get_dvr_mac_address_list'),
|
||||
version=self.DVR_RPC_VERSION,
|
||||
topic=self.topic)
|
||||
|
||||
@log.log
|
||||
def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
|
||||
return self.call(context,
|
||||
self.make_msg('get_compute_ports_on_host_by_subnet',
|
||||
host=host,
|
||||
subnet=subnet),
|
||||
version=self.DVR_RPC_VERSION,
|
||||
topic=self.topic)
|
||||
|
||||
@log.log
|
||||
def get_subnet_for_dvr(self, context, subnet):
|
||||
return self.call(context,
|
||||
self.make_msg('get_subnet_for_dvr',
|
||||
subnet=subnet),
|
||||
version=self.DVR_RPC_VERSION,
|
||||
topic=self.topic)
|
||||
|
||||
|
||||
class DVRServerRpcCallbackMixin(object):
|
||||
"""Plugin-side RPC (implementation) for agent-to-plugin interaction."""
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
if not getattr(self, '_plugin', None):
|
||||
self._plugin = manager.NeutronManager.get_plugin()
|
||||
return self._plugin
|
||||
|
||||
def get_dvr_mac_address_list(self, context):
|
||||
return self.plugin.get_dvr_mac_address_list(context)
|
||||
|
||||
def get_dvr_mac_address_by_host(self, context, host):
|
||||
return self.plugin.get_dvr_mac_address_by_host(context, host)
|
||||
|
||||
def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
|
||||
return self.plugin.get_compute_ports_on_host_by_subnet(context,
|
||||
host,
|
||||
subnet)
|
||||
|
||||
def get_subnet_for_dvr(self, context, subnet):
|
||||
return self.plugin.get_subnet_for_dvr(context, subnet)
|
||||
|
||||
|
||||
class DVRAgentRpcApiMixin(object):
|
||||
"""Plugin-side RPC (stub) for plugin-to-agent interaction."""
|
||||
|
||||
DVR_RPC_VERSION = "1.0"
|
||||
|
||||
def _get_dvr_update_topic(self):
|
||||
return topics.get_topic_name(self.topic,
|
||||
topics.DVR,
|
||||
topics.UPDATE)
|
||||
|
||||
def dvr_mac_address_update(self, context, dvr_macs):
|
||||
"""Notify dvr mac address updates."""
|
||||
if not dvr_macs:
|
||||
return
|
||||
self.fanout_cast(context,
|
||||
self.make_msg('dvr_mac_address_update',
|
||||
dvr_macs=dvr_macs),
|
||||
version=self.DVR_RPC_VERSION,
|
||||
topic=self._get_dvr_update_topic())
|
||||
|
||||
|
||||
class DVRAgentRpcCallbackMixin(object):
|
||||
"""Agent-side RPC (implementation) for plugin-to-agent interaction."""
|
||||
|
||||
dvr_agent = None
|
||||
|
||||
def dvr_mac_address_update(self, context, **kwargs):
|
||||
"""Callback for dvr_mac_addresses update.
|
||||
|
||||
:param dvr_macs: list of updated dvr_macs
|
||||
"""
|
||||
dvr_macs = kwargs.get('dvr_macs', [])
|
||||
LOG.debug("dvr_macs updated on remote: %s", dvr_macs)
|
||||
if not self.dvr_agent:
|
||||
LOG.warn(_("DVR agent binding currently not set."))
|
||||
return
|
||||
self.dvr_agent.dvr_mac_address_update(dvr_macs)
|
|
@ -1,777 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import re
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ATTR_NOT_SPECIFIED = object()
|
||||
# Defining a constant to avoid repeating string literal in several modules
|
||||
SHARED = 'shared'
|
||||
|
||||
# Used by range check to indicate no limit for a bound.
|
||||
UNLIMITED = None
|
||||
|
||||
|
||||
def _verify_dict_keys(expected_keys, target_dict, strict=True):
|
||||
"""Allows to verify keys in a dictionary.
|
||||
|
||||
:param expected_keys: A list of keys expected to be present.
|
||||
:param target_dict: The dictionary which should be verified.
|
||||
:param strict: Specifies whether additional keys are allowed to be present.
|
||||
:return: True, if keys in the dictionary correspond to the specification.
|
||||
"""
|
||||
if not isinstance(target_dict, dict):
|
||||
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
|
||||
"with keys: %(expected_keys)s") %
|
||||
{'target_dict': target_dict, 'expected_keys': expected_keys})
|
||||
return msg
|
||||
|
||||
expected_keys = set(expected_keys)
|
||||
provided_keys = set(target_dict.keys())
|
||||
|
||||
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
|
||||
|
||||
if not predicate(provided_keys):
|
||||
msg = (_("Validation of dictionary's keys failed."
|
||||
"Expected keys: %(expected_keys)s "
|
||||
"Provided keys: %(provided_keys)s") %
|
||||
{'expected_keys': expected_keys,
|
||||
'provided_keys': provided_keys})
|
||||
return msg
|
||||
|
||||
|
||||
def is_attr_set(attribute):
|
||||
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
|
||||
|
||||
|
||||
def _validate_values(data, valid_values=None):
|
||||
if data not in valid_values:
|
||||
msg = (_("'%(data)s' is not in %(valid_values)s") %
|
||||
{'data': data, 'valid_values': valid_values})
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_not_empty_string_or_none(data, max_len=None):
|
||||
if data is not None:
|
||||
return _validate_not_empty_string(data, max_len=max_len)
|
||||
|
||||
|
||||
def _validate_not_empty_string(data, max_len=None):
|
||||
msg = _validate_string(data, max_len=max_len)
|
||||
if msg:
|
||||
return msg
|
||||
if not data.strip():
|
||||
return _("'%s' Blank strings are not permitted") % data
|
||||
|
||||
|
||||
def _validate_string_or_none(data, max_len=None):
|
||||
if data is not None:
|
||||
return _validate_string(data, max_len=max_len)
|
||||
|
||||
|
||||
def _validate_string(data, max_len=None):
|
||||
if not isinstance(data, basestring):
|
||||
msg = _("'%s' is not a valid string") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
if max_len is not None and len(data) > max_len:
|
||||
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
|
||||
{'data': data, 'max_len': max_len})
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_boolean(data, valid_values=None):
|
||||
try:
|
||||
convert_to_boolean(data)
|
||||
except n_exc.InvalidInput:
|
||||
msg = _("'%s' is not a valid boolean value") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_range(data, valid_values=None):
|
||||
"""Check that integer value is within a range provided.
|
||||
|
||||
Test is inclusive. Allows either limit to be ignored, to allow
|
||||
checking ranges where only the lower or upper limit matter.
|
||||
It is expected that the limits provided are valid integers or
|
||||
the value None.
|
||||
"""
|
||||
|
||||
min_value = valid_values[0]
|
||||
max_value = valid_values[1]
|
||||
try:
|
||||
data = int(data)
|
||||
except (ValueError, TypeError):
|
||||
msg = _("'%s' is not an integer") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
if min_value is not UNLIMITED and data < min_value:
|
||||
msg = _("'%(data)s' is too small - must be at least "
|
||||
"'%(limit)d'") % {'data': data, 'limit': min_value}
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
if max_value is not UNLIMITED and data > max_value:
|
||||
msg = _("'%(data)s' is too large - must be no larger than "
|
||||
"'%(limit)d'") % {'data': data, 'limit': max_value}
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_no_whitespace(data):
|
||||
"""Validates that input has no whitespace."""
|
||||
if len(data.split()) > 1:
|
||||
msg = _("'%s' contains whitespace") % data
|
||||
LOG.debug(msg)
|
||||
raise n_exc.InvalidInput(error_message=msg)
|
||||
return data
|
||||
|
||||
|
||||
def _validate_mac_address(data, valid_values=None):
|
||||
valid_mac = False
|
||||
try:
|
||||
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
# TODO(arosen): The code in this file should be refactored
|
||||
# so it catches the correct exceptions. _validate_no_whitespace
|
||||
# raises AttributeError if data is None.
|
||||
if valid_mac is False:
|
||||
msg = _("'%s' is not a valid MAC address") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_mac_address_or_none(data, valid_values=None):
|
||||
if data is None:
|
||||
return
|
||||
return _validate_mac_address(data, valid_values)
|
||||
|
||||
|
||||
def _validate_ip_address(data, valid_values=None):
|
||||
try:
|
||||
netaddr.IPAddress(_validate_no_whitespace(data))
|
||||
except Exception:
|
||||
msg = _("'%s' is not a valid IP address") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_ip_pools(data, valid_values=None):
|
||||
"""Validate that start and end IP addresses are present.
|
||||
|
||||
In addition to this the IP addresses will also be validated
|
||||
"""
|
||||
if not isinstance(data, list):
|
||||
msg = _("Invalid data format for IP pool: '%s'") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
expected_keys = ['start', 'end']
|
||||
for ip_pool in data:
|
||||
msg = _verify_dict_keys(expected_keys, ip_pool)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
for k in expected_keys:
|
||||
msg = _validate_ip_address(ip_pool[k])
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_fixed_ips(data, valid_values=None):
|
||||
if not isinstance(data, list):
|
||||
msg = _("Invalid data format for fixed IP: '%s'") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
ips = []
|
||||
for fixed_ip in data:
|
||||
if not isinstance(fixed_ip, dict):
|
||||
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
if 'ip_address' in fixed_ip:
|
||||
# Ensure that duplicate entries are not set - just checking IP
|
||||
# suffices. Duplicate subnet_id's are legitimate.
|
||||
fixed_ip_address = fixed_ip['ip_address']
|
||||
if fixed_ip_address in ips:
|
||||
msg = _("Duplicate IP address '%s'") % fixed_ip_address
|
||||
else:
|
||||
msg = _validate_ip_address(fixed_ip_address)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
ips.append(fixed_ip_address)
|
||||
if 'subnet_id' in fixed_ip:
|
||||
msg = _validate_uuid(fixed_ip['subnet_id'])
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_nameservers(data, valid_values=None):
|
||||
if not hasattr(data, '__iter__'):
|
||||
msg = _("Invalid data format for nameserver: '%s'") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
ips = []
|
||||
for ip in data:
|
||||
msg = _validate_ip_address(ip)
|
||||
if msg:
|
||||
# This may be a hostname
|
||||
msg = _validate_regex(ip, HOSTNAME_PATTERN)
|
||||
if msg:
|
||||
msg = _("'%s' is not a valid nameserver") % ip
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
if ip in ips:
|
||||
msg = _("Duplicate nameserver '%s'") % ip
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
ips.append(ip)
|
||||
|
||||
|
||||
def _validate_hostroutes(data, valid_values=None):
|
||||
if not isinstance(data, list):
|
||||
msg = _("Invalid data format for hostroute: '%s'") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
expected_keys = ['destination', 'nexthop']
|
||||
hostroutes = []
|
||||
for hostroute in data:
|
||||
msg = _verify_dict_keys(expected_keys, hostroute)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
msg = _validate_subnet(hostroute['destination'])
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
msg = _validate_ip_address(hostroute['nexthop'])
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
if hostroute in hostroutes:
|
||||
msg = _("Duplicate hostroute '%s'") % hostroute
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
hostroutes.append(hostroute)
|
||||
|
||||
|
||||
def _validate_ip_address_or_none(data, valid_values=None):
|
||||
if data is None:
|
||||
return None
|
||||
return _validate_ip_address(data, valid_values)
|
||||
|
||||
|
||||
def _validate_subnet(data, valid_values=None):
|
||||
msg = None
|
||||
try:
|
||||
net = netaddr.IPNetwork(_validate_no_whitespace(data))
|
||||
if '/' not in data:
|
||||
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
|
||||
" '%(cidr)s' is recommended") % {"data": data,
|
||||
"cidr": net.cidr}
|
||||
else:
|
||||
return
|
||||
except Exception:
|
||||
msg = _("'%s' is not a valid IP subnet") % data
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_subnet_list(data, valid_values=None):
|
||||
if not isinstance(data, list):
|
||||
msg = _("'%s' is not a list") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
if len(set(data)) != len(data):
|
||||
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
for item in data:
|
||||
msg = _validate_subnet(item)
|
||||
if msg:
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_subnet_or_none(data, valid_values=None):
|
||||
if data is None:
|
||||
return
|
||||
return _validate_subnet(data, valid_values)
|
||||
|
||||
|
||||
def _validate_regex(data, valid_values=None):
|
||||
try:
|
||||
if re.match(valid_values, data):
|
||||
return
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
msg = _("'%s' is not a valid input") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_regex_or_none(data, valid_values=None):
|
||||
if data is None:
|
||||
return
|
||||
return _validate_regex(data, valid_values)
|
||||
|
||||
|
||||
def _validate_uuid(data, valid_values=None):
|
||||
if not uuidutils.is_uuid_like(data):
|
||||
msg = _("'%s' is not a valid UUID") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_uuid_or_none(data, valid_values=None):
|
||||
if data is not None:
|
||||
return _validate_uuid(data)
|
||||
|
||||
|
||||
def _validate_uuid_list(data, valid_values=None):
|
||||
if not isinstance(data, list):
|
||||
msg = _("'%s' is not a list") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
for item in data:
|
||||
msg = _validate_uuid(item)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
if len(set(data)) != len(data):
|
||||
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_dict_item(key, key_validator, data):
|
||||
# Find conversion function, if any, and apply it
|
||||
conv_func = key_validator.get('convert_to')
|
||||
if conv_func:
|
||||
data[key] = conv_func(data.get(key))
|
||||
# Find validator function
|
||||
# TODO(salv-orlando): Structure of dict attributes should be improved
|
||||
# to avoid iterating over items
|
||||
val_func = val_params = None
|
||||
for (k, v) in key_validator.iteritems():
|
||||
if k.startswith('type:'):
|
||||
# ask forgiveness, not permission
|
||||
try:
|
||||
val_func = validators[k]
|
||||
except KeyError:
|
||||
return _("Validator '%s' does not exist.") % k
|
||||
val_params = v
|
||||
break
|
||||
# Process validation
|
||||
if val_func:
|
||||
return val_func(data.get(key), val_params)
|
||||
|
||||
|
||||
def _validate_dict(data, key_specs=None):
|
||||
if not isinstance(data, dict):
|
||||
msg = _("'%s' is not a dictionary") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
# Do not perform any further validation, if no constraints are supplied
|
||||
if not key_specs:
|
||||
return
|
||||
|
||||
# Check whether all required keys are present
|
||||
required_keys = [key for key, spec in key_specs.iteritems()
|
||||
if spec.get('required')]
|
||||
|
||||
if required_keys:
|
||||
msg = _verify_dict_keys(required_keys, data, False)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
# Perform validation and conversion of all values
|
||||
# according to the specifications.
|
||||
for key, key_validator in [(k, v) for k, v in key_specs.iteritems()
|
||||
if k in data]:
|
||||
msg = _validate_dict_item(key, key_validator, data)
|
||||
if msg:
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def _validate_dict_or_none(data, key_specs=None):
|
||||
if data is not None:
|
||||
return _validate_dict(data, key_specs)
|
||||
|
||||
|
||||
def _validate_dict_or_empty(data, key_specs=None):
|
||||
if data != {}:
|
||||
return _validate_dict(data, key_specs)
|
||||
|
||||
|
||||
def _validate_dict_or_nodata(data, key_specs=None):
|
||||
if data:
|
||||
return _validate_dict(data, key_specs)
|
||||
|
||||
|
||||
def _validate_non_negative(data, valid_values=None):
|
||||
try:
|
||||
data = int(data)
|
||||
except (ValueError, TypeError):
|
||||
msg = _("'%s' is not an integer") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
if data < 0:
|
||||
msg = _("'%s' should be non-negative") % data
|
||||
LOG.debug(msg)
|
||||
return msg
|
||||
|
||||
|
||||
def convert_to_boolean(data):
|
||||
if isinstance(data, basestring):
|
||||
val = data.lower()
|
||||
if val == "true" or val == "1":
|
||||
return True
|
||||
if val == "false" or val == "0":
|
||||
return False
|
||||
elif isinstance(data, bool):
|
||||
return data
|
||||
elif isinstance(data, int):
|
||||
if data == 0:
|
||||
return False
|
||||
elif data == 1:
|
||||
return True
|
||||
msg = _("'%s' cannot be converted to boolean") % data
|
||||
raise n_exc.InvalidInput(error_message=msg)
|
||||
|
||||
|
||||
def convert_to_boolean_if_not_none(data):
|
||||
if data is not None:
|
||||
return convert_to_boolean(data)
|
||||
|
||||
|
||||
def convert_to_int(data):
|
||||
try:
|
||||
return int(data)
|
||||
except (ValueError, TypeError):
|
||||
msg = _("'%s' is not a integer") % data
|
||||
raise n_exc.InvalidInput(error_message=msg)
|
||||
|
||||
|
||||
def convert_kvp_str_to_list(data):
|
||||
"""Convert a value of the form 'key=value' to ['key', 'value'].
|
||||
|
||||
:raises: n_exc.InvalidInput if any of the strings are malformed
|
||||
(e.g. do not contain a key).
|
||||
"""
|
||||
kvp = [x.strip() for x in data.split('=', 1)]
|
||||
if len(kvp) == 2 and kvp[0]:
|
||||
return kvp
|
||||
msg = _("'%s' is not of the form <key>=[value]") % data
|
||||
raise n_exc.InvalidInput(error_message=msg)
|
||||
|
||||
|
||||
def convert_kvp_list_to_dict(kvp_list):
|
||||
"""Convert a list of 'key=value' strings to a dict.
|
||||
|
||||
:raises: n_exc.InvalidInput if any of the strings are malformed
|
||||
(e.g. do not contain a key) or if any
|
||||
of the keys appear more than once.
|
||||
"""
|
||||
if kvp_list == ['True']:
|
||||
# No values were provided (i.e. '--flag-name')
|
||||
return {}
|
||||
kvp_map = {}
|
||||
for kvp_str in kvp_list:
|
||||
key, value = convert_kvp_str_to_list(kvp_str)
|
||||
kvp_map.setdefault(key, set())
|
||||
kvp_map[key].add(value)
|
||||
return dict((x, list(y)) for x, y in kvp_map.iteritems())
|
||||
|
||||
|
||||
def convert_none_to_empty_list(value):
|
||||
return [] if value is None else value
|
||||
|
||||
|
||||
def convert_none_to_empty_dict(value):
|
||||
return {} if value is None else value
|
||||
|
||||
|
||||
def convert_to_list(data):
|
||||
if data is None:
|
||||
return []
|
||||
elif hasattr(data, '__iter__'):
|
||||
return list(data)
|
||||
else:
|
||||
return [data]
|
||||
|
||||
|
||||
HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]"
|
||||
"{1,63}(?<!-)\.?)+(?:[a-zA-Z]{2,})$)")
|
||||
|
||||
HEX_ELEM = '[0-9A-Fa-f]'
|
||||
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
|
||||
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
|
||||
HEX_ELEM + '{12}'])
|
||||
# Note: In order to ensure that the MAC address is unicast the first byte
|
||||
# must be even.
|
||||
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
|
||||
|
||||
# Dictionary that maintains a list of validation functions
|
||||
validators = {'type:dict': _validate_dict,
|
||||
'type:dict_or_none': _validate_dict_or_none,
|
||||
'type:dict_or_empty': _validate_dict_or_empty,
|
||||
'type:dict_or_nodata': _validate_dict_or_nodata,
|
||||
'type:fixed_ips': _validate_fixed_ips,
|
||||
'type:hostroutes': _validate_hostroutes,
|
||||
'type:ip_address': _validate_ip_address,
|
||||
'type:ip_address_or_none': _validate_ip_address_or_none,
|
||||
'type:ip_pools': _validate_ip_pools,
|
||||
'type:mac_address': _validate_mac_address,
|
||||
'type:mac_address_or_none': _validate_mac_address_or_none,
|
||||
'type:nameservers': _validate_nameservers,
|
||||
'type:non_negative': _validate_non_negative,
|
||||
'type:range': _validate_range,
|
||||
'type:regex': _validate_regex,
|
||||
'type:regex_or_none': _validate_regex_or_none,
|
||||
'type:string': _validate_string,
|
||||
'type:string_or_none': _validate_string_or_none,
|
||||
'type:not_empty_string': _validate_not_empty_string,
|
||||
'type:not_empty_string_or_none':
|
||||
_validate_not_empty_string_or_none,
|
||||
'type:subnet': _validate_subnet,
|
||||
'type:subnet_list': _validate_subnet_list,
|
||||
'type:subnet_or_none': _validate_subnet_or_none,
|
||||
'type:uuid': _validate_uuid,
|
||||
'type:uuid_or_none': _validate_uuid_or_none,
|
||||
'type:uuid_list': _validate_uuid_list,
|
||||
'type:values': _validate_values,
|
||||
'type:boolean': _validate_boolean}
|
||||
|
||||
# Define constants for base resource name
|
||||
NETWORK = 'network'
|
||||
NETWORKS = '%ss' % NETWORK
|
||||
PORT = 'port'
|
||||
PORTS = '%ss' % PORT
|
||||
SUBNET = 'subnet'
|
||||
SUBNETS = '%ss' % SUBNET
|
||||
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
|
||||
# attribute is not required, but will be generated by the plugin
|
||||
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
|
||||
# is different from an attribute that has been specified with a value of
|
||||
# None. For example, if 'gateway_ip' is omitted in a request to
|
||||
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
|
||||
# and the default gateway_ip will be generated.
|
||||
# However, if gateway_ip is specified as None, this means that
|
||||
# the subnet does not have a gateway IP.
|
||||
# The following is a short reference for understanding attribute info:
|
||||
# default: default value of the attribute (if missing, the attribute
|
||||
# becomes mandatory.
|
||||
# allow_post: the attribute can be used on POST requests.
|
||||
# allow_put: the attribute can be used on PUT requests.
|
||||
# validate: specifies rules for validating data in the attribute.
|
||||
# convert_to: transformation to apply to the value before it is returned
|
||||
# is_visible: the attribute is returned in GET responses.
|
||||
# required_by_policy: the attribute is required by the policy engine and
|
||||
# should therefore be filled by the API layer even if not present in
|
||||
# request body.
|
||||
# enforce_policy: the attribute is actively part of the policy enforcing
|
||||
# mechanism, ie: there might be rules which refer to this attribute.
|
||||
|
||||
RESOURCE_ATTRIBUTE_MAP = {
|
||||
NETWORKS: {
|
||||
'id': {'allow_post': False, 'allow_put': False,
|
||||
'validate': {'type:uuid': None},
|
||||
'is_visible': True,
|
||||
'primary_key': True},
|
||||
'name': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:string': None},
|
||||
'default': '', 'is_visible': True},
|
||||
'subnets': {'allow_post': False, 'allow_put': False,
|
||||
'default': [],
|
||||
'is_visible': True},
|
||||
'admin_state_up': {'allow_post': True, 'allow_put': True,
|
||||
'default': True,
|
||||
'convert_to': convert_to_boolean,
|
||||
'is_visible': True},
|
||||
'status': {'allow_post': False, 'allow_put': False,
|
||||
'is_visible': True},
|
||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
||||
'validate': {'type:string': None},
|
||||
'required_by_policy': True,
|
||||
'is_visible': True},
|
||||
SHARED: {'allow_post': True,
|
||||
'allow_put': True,
|
||||
'default': False,
|
||||
'convert_to': convert_to_boolean,
|
||||
'is_visible': True,
|
||||
'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
},
|
||||
PORTS: {
|
||||
'id': {'allow_post': False, 'allow_put': False,
|
||||
'validate': {'type:uuid': None},
|
||||
'is_visible': True,
|
||||
'primary_key': True},
|
||||
'name': {'allow_post': True, 'allow_put': True, 'default': '',
|
||||
'validate': {'type:string': None},
|
||||
'is_visible': True},
|
||||
'network_id': {'allow_post': True, 'allow_put': False,
|
||||
'required_by_policy': True,
|
||||
'validate': {'type:uuid': None},
|
||||
'is_visible': True},
|
||||
'admin_state_up': {'allow_post': True, 'allow_put': True,
|
||||
'default': True,
|
||||
'convert_to': convert_to_boolean,
|
||||
'is_visible': True},
|
||||
'mac_address': {'allow_post': True, 'allow_put': False,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:mac_address': None},
|
||||
'enforce_policy': True,
|
||||
'is_visible': True},
|
||||
'fixed_ips': {'allow_post': True, 'allow_put': True,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'convert_list_to': convert_kvp_list_to_dict,
|
||||
'validate': {'type:fixed_ips': None},
|
||||
'enforce_policy': True,
|
||||
'is_visible': True},
|
||||
'device_id': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:string': None},
|
||||
'default': '',
|
||||
'is_visible': True},
|
||||
'device_owner': {'allow_post': True, 'allow_put': True,
|
||||
'validate': {'type:string': None},
|
||||
'default': '',
|
||||
'is_visible': True},
|
||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
||||
'validate': {'type:string': None},
|
||||
'required_by_policy': True,
|
||||
'is_visible': True},
|
||||
'status': {'allow_post': False, 'allow_put': False,
|
||||
'is_visible': True},
|
||||
},
|
||||
SUBNETS: {
|
||||
'id': {'allow_post': False, 'allow_put': False,
|
||||
'validate': {'type:uuid': None},
|
||||
'is_visible': True,
|
||||
'primary_key': True},
|
||||
'name': {'allow_post': True, 'allow_put': True, 'default': '',
|
||||
'validate': {'type:string': None},
|
||||
'is_visible': True},
|
||||
'ip_version': {'allow_post': True, 'allow_put': False,
|
||||
'convert_to': convert_to_int,
|
||||
'validate': {'type:values': [4, 6]},
|
||||
'is_visible': True},
|
||||
'network_id': {'allow_post': True, 'allow_put': False,
|
||||
'required_by_policy': True,
|
||||
'validate': {'type:uuid': None},
|
||||
'is_visible': True},
|
||||
'cidr': {'allow_post': True, 'allow_put': False,
|
||||
'validate': {'type:subnet': None},
|
||||
'is_visible': True},
|
||||
'gateway_ip': {'allow_post': True, 'allow_put': True,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:ip_address_or_none': None},
|
||||
'is_visible': True},
|
||||
'allocation_pools': {'allow_post': True, 'allow_put': True,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:ip_pools': None},
|
||||
'is_visible': True},
|
||||
'dns_nameservers': {'allow_post': True, 'allow_put': True,
|
||||
'convert_to': convert_none_to_empty_list,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:nameservers': None},
|
||||
'is_visible': True},
|
||||
'host_routes': {'allow_post': True, 'allow_put': True,
|
||||
'convert_to': convert_none_to_empty_list,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:hostroutes': None},
|
||||
'is_visible': True},
|
||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
||||
'validate': {'type:string': None},
|
||||
'required_by_policy': True,
|
||||
'is_visible': True},
|
||||
'enable_dhcp': {'allow_post': True, 'allow_put': True,
|
||||
'default': True,
|
||||
'convert_to': convert_to_boolean,
|
||||
'is_visible': True},
|
||||
'ipv6_ra_mode': {'allow_post': True, 'allow_put': True,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:values': constants.IPV6_MODES},
|
||||
'is_visible': True},
|
||||
'ipv6_address_mode': {'allow_post': True, 'allow_put': True,
|
||||
'default': ATTR_NOT_SPECIFIED,
|
||||
'validate': {'type:values':
|
||||
constants.IPV6_MODES},
|
||||
'is_visible': True},
|
||||
SHARED: {'allow_post': False,
|
||||
'allow_put': False,
|
||||
'default': False,
|
||||
'convert_to': convert_to_boolean,
|
||||
'is_visible': False,
|
||||
'required_by_policy': True,
|
||||
'enforce_policy': True},
|
||||
}
|
||||
}
|
||||
|
||||
# Identify the attribute used by a resource to reference another resource
|
||||
|
||||
RESOURCE_FOREIGN_KEYS = {
|
||||
NETWORKS: 'network_id'
|
||||
}
|
||||
|
||||
PLURALS = {NETWORKS: NETWORK,
|
||||
PORTS: PORT,
|
||||
SUBNETS: SUBNET,
|
||||
'dns_nameservers': 'dns_nameserver',
|
||||
'host_routes': 'host_route',
|
||||
'allocation_pools': 'allocation_pool',
|
||||
'fixed_ips': 'fixed_ip',
|
||||
'extensions': 'extension'}
|
||||
EXT_NSES = {}
|
||||
|
||||
# Namespaces to be added for backward compatibility
|
||||
# when existing extended resource attributes are
|
||||
# provided by other extension than original one.
|
||||
EXT_NSES_BC = {}
|
||||
|
||||
|
||||
def get_attr_metadata():
|
||||
return {'plurals': PLURALS,
|
||||
'xmlns': constants.XML_NS_V20,
|
||||
constants.EXT_NS: EXT_NSES,
|
||||
constants.EXT_NS_COMP: EXT_NSES_BC}
|
|
@ -1,677 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import netaddr
|
||||
import webob.exc
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.api import api_common
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.api.v2 import resource as wsgi_resource
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import policy
|
||||
from neutron import quota
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
|
||||
exceptions.Conflict: webob.exc.HTTPConflict,
|
||||
exceptions.InUse: webob.exc.HTTPConflict,
|
||||
exceptions.BadRequest: webob.exc.HTTPBadRequest,
|
||||
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
|
||||
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
|
||||
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
|
||||
}
|
||||
|
||||
|
||||
class Controller(object):
|
||||
LIST = 'list'
|
||||
SHOW = 'show'
|
||||
CREATE = 'create'
|
||||
UPDATE = 'update'
|
||||
DELETE = 'delete'
|
||||
|
||||
def __init__(self, plugin, collection, resource, attr_info,
|
||||
allow_bulk=False, member_actions=None, parent=None,
|
||||
allow_pagination=False, allow_sorting=False):
|
||||
if member_actions is None:
|
||||
member_actions = []
|
||||
self._plugin = plugin
|
||||
self._collection = collection.replace('-', '_')
|
||||
self._resource = resource.replace('-', '_')
|
||||
self._attr_info = attr_info
|
||||
self._allow_bulk = allow_bulk
|
||||
self._allow_pagination = allow_pagination
|
||||
self._allow_sorting = allow_sorting
|
||||
self._native_bulk = self._is_native_bulk_supported()
|
||||
self._native_pagination = self._is_native_pagination_supported()
|
||||
self._native_sorting = self._is_native_sorting_supported()
|
||||
self._policy_attrs = [name for (name, info) in self._attr_info.items()
|
||||
if info.get('required_by_policy')]
|
||||
self._notifier = n_rpc.get_notifier('network')
|
||||
# use plugin's dhcp notifier, if this is already instantiated
|
||||
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
|
||||
self._dhcp_agent_notifier = (
|
||||
agent_notifiers.get(const.AGENT_TYPE_DHCP) or
|
||||
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
|
||||
)
|
||||
if cfg.CONF.notify_nova_on_port_data_changes:
|
||||
from neutron.notifiers import nova
|
||||
self._nova_notifier = nova.Notifier()
|
||||
self._member_actions = member_actions
|
||||
self._primary_key = self._get_primary_key()
|
||||
if self._allow_pagination and self._native_pagination:
|
||||
# Native pagination need native sorting support
|
||||
if not self._native_sorting:
|
||||
raise exceptions.Invalid(
|
||||
_("Native pagination depend on native sorting")
|
||||
)
|
||||
if not self._allow_sorting:
|
||||
LOG.info(_("Allow sorting is enabled because native "
|
||||
"pagination requires native sorting"))
|
||||
self._allow_sorting = True
|
||||
|
||||
if parent:
|
||||
self._parent_id_name = '%s_id' % parent['member_name']
|
||||
parent_part = '_%s' % parent['member_name']
|
||||
else:
|
||||
self._parent_id_name = None
|
||||
parent_part = ''
|
||||
self._plugin_handlers = {
|
||||
self.LIST: 'get%s_%s' % (parent_part, self._collection),
|
||||
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
|
||||
}
|
||||
for action in [self.CREATE, self.UPDATE, self.DELETE]:
|
||||
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
|
||||
self._resource)
|
||||
|
||||
def _get_primary_key(self, default_primary_key='id'):
|
||||
for key, value in self._attr_info.iteritems():
|
||||
if value.get('primary_key', False):
|
||||
return key
|
||||
return default_primary_key
|
||||
|
||||
def _is_native_bulk_supported(self):
|
||||
native_bulk_attr_name = ("_%s__native_bulk_support"
|
||||
% self._plugin.__class__.__name__)
|
||||
return getattr(self._plugin, native_bulk_attr_name, False)
|
||||
|
||||
def _is_native_pagination_supported(self):
|
||||
native_pagination_attr_name = ("_%s__native_pagination_support"
|
||||
% self._plugin.__class__.__name__)
|
||||
return getattr(self._plugin, native_pagination_attr_name, False)
|
||||
|
||||
def _is_native_sorting_supported(self):
|
||||
native_sorting_attr_name = ("_%s__native_sorting_support"
|
||||
% self._plugin.__class__.__name__)
|
||||
return getattr(self._plugin, native_sorting_attr_name, False)
|
||||
|
||||
def _exclude_attributes_by_policy(self, context, data):
|
||||
"""Identifies attributes to exclude according to authZ policies.
|
||||
|
||||
Return a list of attribute names which should be stripped from the
|
||||
response returned to the user because the user is not authorized
|
||||
to see them.
|
||||
"""
|
||||
attributes_to_exclude = []
|
||||
for attr_name in data.keys():
|
||||
attr_data = self._attr_info.get(attr_name)
|
||||
if attr_data and attr_data['is_visible']:
|
||||
if policy.check(
|
||||
context,
|
||||
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
|
||||
data,
|
||||
might_not_exist=True):
|
||||
# this attribute is visible, check next one
|
||||
continue
|
||||
# if the code reaches this point then either the policy check
|
||||
# failed or the attribute was not visible in the first place
|
||||
attributes_to_exclude.append(attr_name)
|
||||
return attributes_to_exclude
|
||||
|
||||
def _view(self, context, data, fields_to_strip=None):
|
||||
"""Build a view of an API resource.
|
||||
|
||||
:param context: the neutron context
|
||||
:param data: the object for which a view is being created
|
||||
:param fields_to_strip: attributes to remove from the view
|
||||
|
||||
:returns: a view of the object which includes only attributes
|
||||
visible according to API resource declaration and authZ policies.
|
||||
"""
|
||||
fields_to_strip = ((fields_to_strip or []) +
|
||||
self._exclude_attributes_by_policy(context, data))
|
||||
return self._filter_attributes(context, data, fields_to_strip)
|
||||
|
||||
def _filter_attributes(self, context, data, fields_to_strip=None):
|
||||
if not fields_to_strip:
|
||||
return data
|
||||
return dict(item for item in data.iteritems()
|
||||
if (item[0] not in fields_to_strip))
|
||||
|
||||
def _do_field_list(self, original_fields):
|
||||
fields_to_add = None
|
||||
# don't do anything if fields were not specified in the request
|
||||
if original_fields:
|
||||
fields_to_add = [attr for attr in self._policy_attrs
|
||||
if attr not in original_fields]
|
||||
original_fields.extend(self._policy_attrs)
|
||||
return original_fields, fields_to_add
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self._member_actions:
|
||||
def _handle_action(request, id, **kwargs):
|
||||
arg_list = [request.context, id]
|
||||
# Ensure policy engine is initialized
|
||||
policy.init()
|
||||
# Fetch the resource and verify if the user can access it
|
||||
try:
|
||||
resource = self._item(request, id, True)
|
||||
except exceptions.PolicyNotAuthorized:
|
||||
msg = _('The resource could not be found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
body = kwargs.pop('body', None)
|
||||
# Explicit comparison with None to distinguish from {}
|
||||
if body is not None:
|
||||
arg_list.append(body)
|
||||
# It is ok to raise a 403 because accessibility to the
|
||||
# object was checked earlier in this method
|
||||
policy.enforce(request.context, name, resource)
|
||||
return getattr(self._plugin, name)(*arg_list, **kwargs)
|
||||
return _handle_action
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
def _get_pagination_helper(self, request):
|
||||
if self._allow_pagination and self._native_pagination:
|
||||
return api_common.PaginationNativeHelper(request,
|
||||
self._primary_key)
|
||||
elif self._allow_pagination:
|
||||
return api_common.PaginationEmulatedHelper(request,
|
||||
self._primary_key)
|
||||
return api_common.NoPaginationHelper(request, self._primary_key)
|
||||
|
||||
def _get_sorting_helper(self, request):
|
||||
if self._allow_sorting and self._native_sorting:
|
||||
return api_common.SortingNativeHelper(request, self._attr_info)
|
||||
elif self._allow_sorting:
|
||||
return api_common.SortingEmulatedHelper(request, self._attr_info)
|
||||
return api_common.NoSortingHelper(request, self._attr_info)
|
||||
|
||||
def _items(self, request, do_authz=False, parent_id=None):
|
||||
"""Retrieves and formats a list of elements of the requested entity."""
|
||||
# NOTE(salvatore-orlando): The following ensures that fields which
|
||||
# are needed for authZ policy validation are not stripped away by the
|
||||
# plugin before returning.
|
||||
original_fields, fields_to_add = self._do_field_list(
|
||||
api_common.list_args(request, 'fields'))
|
||||
filters = api_common.get_filters(request, self._attr_info,
|
||||
['fields', 'sort_key', 'sort_dir',
|
||||
'limit', 'marker', 'page_reverse'])
|
||||
kwargs = {'filters': filters,
|
||||
'fields': original_fields}
|
||||
sorting_helper = self._get_sorting_helper(request)
|
||||
pagination_helper = self._get_pagination_helper(request)
|
||||
sorting_helper.update_args(kwargs)
|
||||
sorting_helper.update_fields(original_fields, fields_to_add)
|
||||
pagination_helper.update_args(kwargs)
|
||||
pagination_helper.update_fields(original_fields, fields_to_add)
|
||||
if parent_id:
|
||||
kwargs[self._parent_id_name] = parent_id
|
||||
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
|
||||
obj_list = obj_getter(request.context, **kwargs)
|
||||
obj_list = sorting_helper.sort(obj_list)
|
||||
obj_list = pagination_helper.paginate(obj_list)
|
||||
# Check authz
|
||||
if do_authz:
|
||||
# FIXME(salvatore-orlando): obj_getter might return references to
|
||||
# other resources. Must check authZ on them too.
|
||||
# Omit items from list that should not be visible
|
||||
obj_list = [obj for obj in obj_list
|
||||
if policy.check(request.context,
|
||||
self._plugin_handlers[self.SHOW],
|
||||
obj,
|
||||
plugin=self._plugin)]
|
||||
# Use the first element in the list for discriminating which attributes
|
||||
# should be filtered out because of authZ policies
|
||||
# fields_to_add contains a list of attributes added for request policy
|
||||
# checks but that were not required by the user. They should be
|
||||
# therefore stripped
|
||||
fields_to_strip = fields_to_add or []
|
||||
if obj_list:
|
||||
fields_to_strip += self._exclude_attributes_by_policy(
|
||||
request.context, obj_list[0])
|
||||
collection = {self._collection:
|
||||
[self._filter_attributes(
|
||||
request.context, obj,
|
||||
fields_to_strip=fields_to_strip)
|
||||
for obj in obj_list]}
|
||||
pagination_links = pagination_helper.get_links(obj_list)
|
||||
if pagination_links:
|
||||
collection[self._collection + "_links"] = pagination_links
|
||||
return collection
|
||||
|
||||
def _item(self, request, id, do_authz=False, field_list=None,
|
||||
parent_id=None):
|
||||
"""Retrieves and formats a single element of the requested entity."""
|
||||
kwargs = {'fields': field_list}
|
||||
action = self._plugin_handlers[self.SHOW]
|
||||
if parent_id:
|
||||
kwargs[self._parent_id_name] = parent_id
|
||||
obj_getter = getattr(self._plugin, action)
|
||||
obj = obj_getter(request.context, id, **kwargs)
|
||||
# Check authz
|
||||
# FIXME(salvatore-orlando): obj_getter might return references to
|
||||
# other resources. Must check authZ on them too.
|
||||
if do_authz:
|
||||
policy.enforce(request.context, action, obj)
|
||||
return obj
|
||||
|
||||
def _send_dhcp_notification(self, context, data, methodname):
|
||||
if cfg.CONF.dhcp_agent_notification:
|
||||
if self._collection in data:
|
||||
for body in data[self._collection]:
|
||||
item = {self._resource: body}
|
||||
self._dhcp_agent_notifier.notify(context, item, methodname)
|
||||
else:
|
||||
self._dhcp_agent_notifier.notify(context, data, methodname)
|
||||
|
||||
def _send_nova_notification(self, action, orig, returned):
|
||||
if hasattr(self, '_nova_notifier'):
|
||||
self._nova_notifier.send_network_change(action, orig, returned)
|
||||
|
||||
def index(self, request, **kwargs):
|
||||
"""Returns a list of the requested entity."""
|
||||
parent_id = kwargs.get(self._parent_id_name)
|
||||
# Ensure policy engine is initialized
|
||||
policy.init()
|
||||
return self._items(request, True, parent_id)
|
||||
|
||||
def show(self, request, id, **kwargs):
|
||||
"""Returns detailed information about the requested entity."""
|
||||
try:
|
||||
# NOTE(salvatore-orlando): The following ensures that fields
|
||||
# which are needed for authZ policy validation are not stripped
|
||||
# away by the plugin before returning.
|
||||
field_list, added_fields = self._do_field_list(
|
||||
api_common.list_args(request, "fields"))
|
||||
parent_id = kwargs.get(self._parent_id_name)
|
||||
# Ensure policy engine is initialized
|
||||
policy.init()
|
||||
return {self._resource:
|
||||
self._view(request.context,
|
||||
self._item(request,
|
||||
id,
|
||||
do_authz=True,
|
||||
field_list=field_list,
|
||||
parent_id=parent_id),
|
||||
fields_to_strip=added_fields)}
|
||||
except exceptions.PolicyNotAuthorized:
|
||||
# To avoid giving away information, pretend that it
|
||||
# doesn't exist
|
||||
msg = _('The resource could not be found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
|
||||
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
|
||||
objs = []
|
||||
try:
|
||||
for item in body[self._collection]:
|
||||
kwargs = {self._resource: item}
|
||||
if parent_id:
|
||||
kwargs[self._parent_id_name] = parent_id
|
||||
fields_to_strip = self._exclude_attributes_by_policy(
|
||||
request.context, item)
|
||||
objs.append(self._filter_attributes(
|
||||
request.context,
|
||||
obj_creator(request.context, **kwargs),
|
||||
fields_to_strip=fields_to_strip))
|
||||
return objs
|
||||
# Note(salvatore-orlando): broad catch as in theory a plugin
|
||||
# could raise any kind of exception
|
||||
except Exception as ex:
|
||||
for obj in objs:
|
||||
obj_deleter = getattr(self._plugin,
|
||||
self._plugin_handlers[self.DELETE])
|
||||
try:
|
||||
kwargs = ({self._parent_id_name: parent_id} if parent_id
|
||||
else {})
|
||||
obj_deleter(request.context, obj['id'], **kwargs)
|
||||
except Exception:
|
||||
# broad catch as our only purpose is to log the exception
|
||||
LOG.exception(_("Unable to undo add for "
|
||||
"%(resource)s %(id)s"),
|
||||
{'resource': self._resource,
|
||||
'id': obj['id']})
|
||||
# TODO(salvatore-orlando): The object being processed when the
|
||||
# plugin raised might have been created or not in the db.
|
||||
# We need a way for ensuring that if it has been created,
|
||||
# it is then deleted
|
||||
raise ex
|
||||
|
||||
def create(self, request, body=None, **kwargs):
|
||||
"""Creates a new instance of the requested entity."""
|
||||
parent_id = kwargs.get(self._parent_id_name)
|
||||
self._notifier.info(request.context,
|
||||
self._resource + '.create.start',
|
||||
body)
|
||||
body = Controller.prepare_request_body(request.context, body, True,
|
||||
self._resource, self._attr_info,
|
||||
allow_bulk=self._allow_bulk)
|
||||
action = self._plugin_handlers[self.CREATE]
|
||||
# Check authz
|
||||
if self._collection in body:
|
||||
# Have to account for bulk create
|
||||
items = body[self._collection]
|
||||
deltas = {}
|
||||
bulk = True
|
||||
else:
|
||||
items = [body]
|
||||
bulk = False
|
||||
# Ensure policy engine is initialized
|
||||
policy.init()
|
||||
for item in items:
|
||||
self._validate_network_tenant_ownership(request,
|
||||
item[self._resource])
|
||||
policy.enforce(request.context,
|
||||
action,
|
||||
item[self._resource])
|
||||
try:
|
||||
tenant_id = item[self._resource]['tenant_id']
|
||||
count = quota.QUOTAS.count(request.context, self._resource,
|
||||
self._plugin, self._collection,
|
||||
tenant_id)
|
||||
if bulk:
|
||||
delta = deltas.get(tenant_id, 0) + 1
|
||||
deltas[tenant_id] = delta
|
||||
else:
|
||||
delta = 1
|
||||
kwargs = {self._resource: count + delta}
|
||||
except exceptions.QuotaResourceUnknown as e:
|
||||
# We don't want to quota this resource
|
||||
LOG.debug(e)
|
||||
else:
|
||||
quota.QUOTAS.limit_check(request.context,
|
||||
item[self._resource]['tenant_id'],
|
||||
**kwargs)
|
||||
|
||||
def notify(create_result):
|
||||
notifier_method = self._resource + '.create.end'
|
||||
self._notifier.info(request.context,
|
||||
notifier_method,
|
||||
create_result)
|
||||
self._send_dhcp_notification(request.context,
|
||||
create_result,
|
||||
notifier_method)
|
||||
return create_result
|
||||
|
||||
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
|
||||
if self._collection in body and self._native_bulk:
|
||||
# plugin does atomic bulk create operations
|
||||
obj_creator = getattr(self._plugin, "%s_bulk" % action)
|
||||
objs = obj_creator(request.context, body, **kwargs)
|
||||
# Use first element of list to discriminate attributes which
|
||||
# should be removed because of authZ policies
|
||||
fields_to_strip = self._exclude_attributes_by_policy(
|
||||
request.context, objs[0])
|
||||
return notify({self._collection: [self._filter_attributes(
|
||||
request.context, obj, fields_to_strip=fields_to_strip)
|
||||
for obj in objs]})
|
||||
else:
|
||||
obj_creator = getattr(self._plugin, action)
|
||||
if self._collection in body:
|
||||
# Emulate atomic bulk behavior
|
||||
objs = self._emulate_bulk_create(obj_creator, request,
|
||||
body, parent_id)
|
||||
return notify({self._collection: objs})
|
||||
else:
|
||||
kwargs.update({self._resource: body})
|
||||
obj = obj_creator(request.context, **kwargs)
|
||||
self._send_nova_notification(action, {},
|
||||
{self._resource: obj})
|
||||
return notify({self._resource: self._view(request.context,
|
||||
obj)})
|
||||
|
||||
def delete(self, request, id, **kwargs):
|
||||
"""Deletes the specified entity."""
|
||||
self._notifier.info(request.context,
|
||||
self._resource + '.delete.start',
|
||||
{self._resource + '_id': id})
|
||||
action = self._plugin_handlers[self.DELETE]
|
||||
|
||||
# Check authz
|
||||
policy.init()
|
||||
parent_id = kwargs.get(self._parent_id_name)
|
||||
obj = self._item(request, id, parent_id=parent_id)
|
||||
try:
|
||||
policy.enforce(request.context,
|
||||
action,
|
||||
obj)
|
||||
except exceptions.PolicyNotAuthorized:
|
||||
# To avoid giving away information, pretend that it
|
||||
# doesn't exist
|
||||
msg = _('The resource could not be found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
|
||||
obj_deleter = getattr(self._plugin, action)
|
||||
obj_deleter(request.context, id, **kwargs)
|
||||
notifier_method = self._resource + '.delete.end'
|
||||
self._notifier.info(request.context,
|
||||
notifier_method,
|
||||
{self._resource + '_id': id})
|
||||
result = {self._resource: self._view(request.context, obj)}
|
||||
self._send_nova_notification(action, {}, result)
|
||||
self._send_dhcp_notification(request.context,
|
||||
result,
|
||||
notifier_method)
|
||||
|
||||
def update(self, request, id, body=None, **kwargs):
|
||||
"""Updates the specified entity's attributes."""
|
||||
parent_id = kwargs.get(self._parent_id_name)
|
||||
try:
|
||||
payload = body.copy()
|
||||
except AttributeError:
|
||||
msg = _("Invalid format: %s") % request.body
|
||||
raise exceptions.BadRequest(resource='body', msg=msg)
|
||||
payload['id'] = id
|
||||
self._notifier.info(request.context,
|
||||
self._resource + '.update.start',
|
||||
payload)
|
||||
body = Controller.prepare_request_body(request.context, body, False,
|
||||
self._resource, self._attr_info,
|
||||
allow_bulk=self._allow_bulk)
|
||||
action = self._plugin_handlers[self.UPDATE]
|
||||
# Load object to check authz
|
||||
# but pass only attributes in the original body and required
|
||||
# by the policy engine to the policy 'brain'
|
||||
field_list = [name for (name, value) in self._attr_info.iteritems()
|
||||
if (value.get('required_by_policy') or
|
||||
value.get('primary_key') or
|
||||
'default' not in value)]
|
||||
# Ensure policy engine is initialized
|
||||
policy.init()
|
||||
orig_obj = self._item(request, id, field_list=field_list,
|
||||
parent_id=parent_id)
|
||||
orig_object_copy = copy.copy(orig_obj)
|
||||
orig_obj.update(body[self._resource])
|
||||
try:
|
||||
policy.enforce(request.context,
|
||||
action,
|
||||
orig_obj)
|
||||
except exceptions.PolicyNotAuthorized:
|
||||
# To avoid giving away information, pretend that it
|
||||
# doesn't exist
|
||||
msg = _('The resource could not be found.')
|
||||
raise webob.exc.HTTPNotFound(msg)
|
||||
|
||||
obj_updater = getattr(self._plugin, action)
|
||||
kwargs = {self._resource: body}
|
||||
if parent_id:
|
||||
kwargs[self._parent_id_name] = parent_id
|
||||
obj = obj_updater(request.context, id, **kwargs)
|
||||
result = {self._resource: self._view(request.context, obj)}
|
||||
notifier_method = self._resource + '.update.end'
|
||||
self._notifier.info(request.context, notifier_method, result)
|
||||
self._send_dhcp_notification(request.context,
|
||||
result,
|
||||
notifier_method)
|
||||
self._send_nova_notification(action, orig_object_copy, result)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _populate_tenant_id(context, res_dict, is_create):
|
||||
|
||||
if (('tenant_id' in res_dict and
|
||||
res_dict['tenant_id'] != context.tenant_id and
|
||||
not context.is_admin)):
|
||||
msg = _("Specifying 'tenant_id' other than authenticated "
|
||||
"tenant in request requires admin privileges")
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
|
||||
if is_create and 'tenant_id' not in res_dict:
|
||||
if context.tenant_id:
|
||||
res_dict['tenant_id'] = context.tenant_id
|
||||
else:
|
||||
msg = _("Running without keystone AuthN requires "
|
||||
" that tenant_id is specified")
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
|
||||
@staticmethod
|
||||
def prepare_request_body(context, body, is_create, resource, attr_info,
|
||||
allow_bulk=False):
|
||||
"""Verifies required attributes are in request body.
|
||||
|
||||
Also checking that an attribute is only specified if it is allowed
|
||||
for the given operation (create/update).
|
||||
|
||||
Attribute with default values are considered to be optional.
|
||||
|
||||
body argument must be the deserialized body.
|
||||
"""
|
||||
collection = resource + "s"
|
||||
if not body:
|
||||
raise webob.exc.HTTPBadRequest(_("Resource body required"))
|
||||
|
||||
LOG.debug(_("Request body: %(body)s"), {'body': body})
|
||||
prep_req_body = lambda x: Controller.prepare_request_body(
|
||||
context,
|
||||
x if resource in x else {resource: x},
|
||||
is_create,
|
||||
resource,
|
||||
attr_info,
|
||||
allow_bulk)
|
||||
if collection in body:
|
||||
if not allow_bulk:
|
||||
raise webob.exc.HTTPBadRequest(_("Bulk operation "
|
||||
"not supported"))
|
||||
bulk_body = [prep_req_body(item) for item in body[collection]]
|
||||
if not bulk_body:
|
||||
raise webob.exc.HTTPBadRequest(_("Resources required"))
|
||||
return {collection: bulk_body}
|
||||
|
||||
res_dict = body.get(resource)
|
||||
if res_dict is None:
|
||||
msg = _("Unable to find '%s' in request body") % resource
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
|
||||
Controller._populate_tenant_id(context, res_dict, is_create)
|
||||
Controller._verify_attributes(res_dict, attr_info)
|
||||
|
||||
if is_create: # POST
|
||||
for attr, attr_vals in attr_info.iteritems():
|
||||
if attr_vals['allow_post']:
|
||||
if ('default' not in attr_vals and
|
||||
attr not in res_dict):
|
||||
msg = _("Failed to parse request. Required "
|
||||
"attribute '%s' not specified") % attr
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
res_dict[attr] = res_dict.get(attr,
|
||||
attr_vals.get('default'))
|
||||
else:
|
||||
if attr in res_dict:
|
||||
msg = _("Attribute '%s' not allowed in POST") % attr
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
else: # PUT
|
||||
for attr, attr_vals in attr_info.iteritems():
|
||||
if attr in res_dict and not attr_vals['allow_put']:
|
||||
msg = _("Cannot update read-only attribute %s") % attr
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
|
||||
for attr, attr_vals in attr_info.iteritems():
|
||||
if (attr not in res_dict or
|
||||
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
|
||||
continue
|
||||
# Convert values if necessary
|
||||
if 'convert_to' in attr_vals:
|
||||
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
|
||||
# Check that configured values are correct
|
||||
if 'validate' not in attr_vals:
|
||||
continue
|
||||
for rule in attr_vals['validate']:
|
||||
res = attributes.validators[rule](res_dict[attr],
|
||||
attr_vals['validate'][rule])
|
||||
if res:
|
||||
msg_dict = dict(attr=attr, reason=res)
|
||||
msg = _("Invalid input for %(attr)s. "
|
||||
"Reason: %(reason)s.") % msg_dict
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
return body
|
||||
|
||||
@staticmethod
|
||||
def _verify_attributes(res_dict, attr_info):
|
||||
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
|
||||
if extra_keys:
|
||||
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
|
||||
raise webob.exc.HTTPBadRequest(msg)
|
||||
|
||||
def _validate_network_tenant_ownership(self, request, resource_item):
|
||||
# TODO(salvatore-orlando): consider whether this check can be folded
|
||||
# in the policy engine
|
||||
if (request.context.is_admin or
|
||||
self._resource not in ('port', 'subnet')):
|
||||
return
|
||||
network = self._plugin.get_network(
|
||||
request.context,
|
||||
resource_item['network_id'])
|
||||
# do not perform the check on shared networks
|
||||
if network.get('shared'):
|
||||
return
|
||||
|
||||
network_owner = network['tenant_id']
|
||||
|
||||
if network_owner != resource_item['tenant_id']:
|
||||
msg = _("Tenant %(tenant_id)s not allowed to "
|
||||
"create %(resource)s on this network")
|
||||
raise webob.exc.HTTPForbidden(msg % {
|
||||
"tenant_id": resource_item['tenant_id'],
|
||||
"resource": self._resource,
|
||||
})
|
||||
|
||||
|
||||
def create_resource(collection, resource, plugin, params, allow_bulk=False,
|
||||
member_actions=None, parent=None, allow_pagination=False,
|
||||
allow_sorting=False):
|
||||
controller = Controller(plugin, collection, resource, params, allow_bulk,
|
||||
member_actions=member_actions, parent=parent,
|
||||
allow_pagination=allow_pagination,
|
||||
allow_sorting=allow_sorting)
|
||||
|
||||
return wsgi_resource.Resource(controller, FAULT_MAP)
|
|
@ -1,69 +0,0 @@
|
|||
# Copyright 2011 Citrix Systems.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import webob.dec
|
||||
|
||||
from neutron.api.views import versions as versions_view
|
||||
from neutron.openstack.common import gettextutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import wsgi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Versions(object):
|
||||
|
||||
@classmethod
|
||||
def factory(cls, global_config, **local_config):
|
||||
return cls()
|
||||
|
||||
@webob.dec.wsgify(RequestClass=wsgi.Request)
|
||||
def __call__(self, req):
|
||||
"""Respond to a request for all Neutron API versions."""
|
||||
version_objs = [
|
||||
{
|
||||
"id": "v2.0",
|
||||
"status": "CURRENT",
|
||||
},
|
||||
]
|
||||
|
||||
if req.path != '/':
|
||||
language = req.best_match_language()
|
||||
msg = _('Unknown API version specified')
|
||||
msg = gettextutils.translate(msg, language)
|
||||
return webob.exc.HTTPNotFound(explanation=msg)
|
||||
|
||||
builder = versions_view.get_view_builder(req)
|
||||
versions = [builder.build(version) for version in version_objs]
|
||||
response = dict(versions=versions)
|
||||
metadata = {
|
||||
"application/xml": {
|
||||
"attributes": {
|
||||
"version": ["status", "id"],
|
||||
"link": ["rel", "href"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
content_type = req.best_match_content_type()
|
||||
body = (wsgi.Serializer(metadata=metadata).
|
||||
serialize(response, content_type))
|
||||
|
||||
response = webob.Response()
|
||||
response.content_type = content_type
|
||||
response.body = body
|
||||
|
||||
return response
|
|
@ -1,58 +0,0 @@
|
|||
# Copyright 2010-2011 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def get_view_builder(req):
|
||||
base_url = req.application_url
|
||||
return ViewBuilder(base_url)
|
||||
|
||||
|
||||
class ViewBuilder(object):
|
||||
|
||||
def __init__(self, base_url):
|
||||
"""Object initialization.
|
||||
|
||||
:param base_url: url of the root wsgi application
|
||||
"""
|
||||
self.base_url = base_url
|
||||
|
||||
def build(self, version_data):
|
||||
"""Generic method used to generate a version entity."""
|
||||
version = {
|
||||
"id": version_data["id"],
|
||||
"status": version_data["status"],
|
||||
"links": self._build_links(version_data),
|
||||
}
|
||||
|
||||
return version
|
||||
|
||||
def _build_links(self, version_data):
|
||||
"""Generate a container of links that refer to the provided version."""
|
||||
href = self.generate_href(version_data["id"])
|
||||
|
||||
links = [
|
||||
{
|
||||
"rel": "self",
|
||||
"href": href,
|
||||
},
|
||||
]
|
||||
|
||||
return links
|
||||
|
||||
def generate_href(self, version_number):
|
||||
"""Create an url that refers to a specific version_number."""
|
||||
return os.path.join(self.base_url, version_number)
|
|
@ -1,71 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
import webob.dec
|
||||
import webob.exc
|
||||
|
||||
from neutron import context
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common.middleware import request_id
|
||||
from neutron import wsgi
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NeutronKeystoneContext(wsgi.Middleware):
|
||||
"""Make a request context from keystone headers."""
|
||||
|
||||
@webob.dec.wsgify
|
||||
def __call__(self, req):
|
||||
# Determine the user ID
|
||||
user_id = req.headers.get('X_USER_ID')
|
||||
if not user_id:
|
||||
LOG.debug(_("X_USER_ID is not found in request"))
|
||||
return webob.exc.HTTPUnauthorized()
|
||||
|
||||
# Determine the tenant
|
||||
tenant_id = req.headers.get('X_PROJECT_ID')
|
||||
|
||||
# Suck out the roles
|
||||
roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
|
||||
|
||||
# Human-friendly names
|
||||
tenant_name = req.headers.get('X_PROJECT_NAME')
|
||||
user_name = req.headers.get('X_USER_NAME')
|
||||
|
||||
# Use request_id if already set
|
||||
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
|
||||
|
||||
# Create a context with the authentication data
|
||||
ctx = context.Context(user_id, tenant_id, roles=roles,
|
||||
user_name=user_name, tenant_name=tenant_name,
|
||||
request_id=req_id)
|
||||
|
||||
# Inject the context...
|
||||
req.environ['neutron.context'] = ctx
|
||||
|
||||
return self.application
|
||||
|
||||
|
||||
def pipeline_factory(loader, global_conf, **local_conf):
|
||||
"""Create a paste pipeline based on the 'auth_strategy' config option."""
|
||||
pipeline = local_conf[cfg.CONF.auth_strategy]
|
||||
pipeline = pipeline.split()
|
||||
filters = [loader.get_filter(n) for n in pipeline[:-1]]
|
||||
app = loader.get_app(pipeline[-1])
|
||||
filters.reverse()
|
||||
for filter in filters:
|
||||
app = filter(app)
|
||||
return app
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,44 +0,0 @@
|
|||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.agent.linux import ovs_lib
|
||||
from neutron.common import utils
|
||||
from neutron.plugins.common import constants as const
|
||||
from neutron.plugins.openvswitch.common import constants as ovs_const
|
||||
|
||||
|
||||
def vxlan_supported(root_helper, from_ip='192.0.2.1', to_ip='192.0.2.2'):
|
||||
name = "vxlantest-" + utils.get_random_string(6)
|
||||
with ovs_lib.OVSBridge(name, root_helper) as br:
|
||||
port = br.add_tunnel_port(from_ip, to_ip, const.TYPE_VXLAN)
|
||||
return port != ovs_const.INVALID_OFPORT
|
||||
|
||||
|
||||
def patch_supported(root_helper):
|
||||
seed = utils.get_random_string(6)
|
||||
name = "patchtest-" + seed
|
||||
peer_name = "peertest0-" + seed
|
||||
patch_name = "peertest1-" + seed
|
||||
with ovs_lib.OVSBridge(name, root_helper) as br:
|
||||
port = br.add_patch_port(patch_name, peer_name)
|
||||
return port != ovs_const.INVALID_OFPORT
|
||||
|
||||
|
||||
def nova_notify_supported():
|
||||
try:
|
||||
import neutron.notifiers.nova # noqa since unused
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
|
@ -1,108 +0,0 @@
|
|||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from neutron.cmd.sanity import checks
|
||||
from neutron.common import config
|
||||
from neutron.openstack.common import log as logging
|
||||
from oslo.config import cfg
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config')
|
||||
|
||||
|
||||
class BoolOptCallback(cfg.BoolOpt):
|
||||
def __init__(self, name, callback, **kwargs):
|
||||
self.callback = callback
|
||||
super(BoolOptCallback, self).__init__(name, **kwargs)
|
||||
|
||||
|
||||
def check_ovs_vxlan():
|
||||
result = checks.vxlan_supported(root_helper=cfg.CONF.AGENT.root_helper)
|
||||
if not result:
|
||||
LOG.error(_('Check for Open vSwitch VXLAN support failed. '
|
||||
'Please ensure that the version of openvswitch '
|
||||
'being used has VXLAN support.'))
|
||||
return result
|
||||
|
||||
|
||||
def check_ovs_patch():
|
||||
result = checks.patch_supported(root_helper=cfg.CONF.AGENT.root_helper)
|
||||
if not result:
|
||||
LOG.error(_('Check for Open vSwitch patch port support failed. '
|
||||
'Please ensure that the version of openvswitch '
|
||||
'being used has patch port support or disable features '
|
||||
'requiring patch ports (gre/vxlan, etc.).'))
|
||||
return result
|
||||
|
||||
|
||||
def check_nova_notify():
|
||||
result = checks.nova_notify_supported()
|
||||
if not result:
|
||||
LOG.error(_('Nova notifcations are enabled, but novaclient is not '
|
||||
'installed. Either disable nova notifications or install '
|
||||
'python-novaclient.'))
|
||||
return result
|
||||
|
||||
|
||||
# Define CLI opts to test specific features, with a calback for the test
|
||||
OPTS = [
|
||||
BoolOptCallback('ovs_vxlan', check_ovs_vxlan, default=False,
|
||||
help=_('Check for vxlan support')),
|
||||
BoolOptCallback('ovs_patch', check_ovs_patch, default=False,
|
||||
help=_('Check for patch port support')),
|
||||
BoolOptCallback('nova_notify', check_nova_notify, default=False,
|
||||
help=_('Check for nova notification support')),
|
||||
]
|
||||
|
||||
|
||||
def enable_tests_from_config():
|
||||
"""If a test can depend on configuration, use this function to set the
|
||||
appropriate CLI option to enable that test. It will then be possible to
|
||||
run all necessary tests, just by passing in the appropriate configs.
|
||||
"""
|
||||
|
||||
if 'vxlan' in cfg.CONF.AGENT.tunnel_types:
|
||||
cfg.CONF.set_override('ovs_vxlan', True)
|
||||
if cfg.CONF.AGENT.tunnel_types:
|
||||
cfg.CONF.set_override('ovs_patch', True)
|
||||
if not cfg.CONF.OVS.use_veth_interconnection:
|
||||
cfg.CONF.set_override('ovs_patch', True)
|
||||
if (cfg.CONF.notify_nova_on_port_status_changes or
|
||||
cfg.CONF.notify_nova_on_port_data_changes):
|
||||
cfg.CONF.set_override('nova_notify', True)
|
||||
|
||||
|
||||
def all_tests_passed():
|
||||
res = True
|
||||
for opt in OPTS:
|
||||
if cfg.CONF.get(opt.name):
|
||||
res &= opt.callback()
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
cfg.CONF.register_cli_opts(OPTS)
|
||||
cfg.CONF.set_override('use_stderr', True)
|
||||
config.setup_logging(cfg.CONF)
|
||||
config.init(sys.argv[1:], default_config_files=[])
|
||||
|
||||
if cfg.CONF.config_file:
|
||||
enable_tests_from_config()
|
||||
|
||||
return 0 if all_tests_passed() else 1
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright (c) 2012 New Dream Network, LLC (DreamHost)
|
||||
# Author: Julien Danjou <julien@danjou.info>
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Cron script to generate usage notifications for networks, ports and
|
||||
subnets.
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import config
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron import context
|
||||
from neutron import manager
|
||||
|
||||
|
||||
def main():
|
||||
config.init(sys.argv[1:])
|
||||
config.setup_logging(cfg.CONF)
|
||||
|
||||
cxt = context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
notifier = n_rpc.get_notifier('network')
|
||||
for network in plugin.get_networks(cxt):
|
||||
notifier.info(cxt, 'network.exists', {'network': network})
|
||||
for subnet in plugin.get_subnets(cxt):
|
||||
notifier.info(cxt, 'subnet.exists', {'subnet': subnet})
|
||||
for port in plugin.get_ports(cxt):
|
||||
notifier.info(cxt, 'port.exists', {'port': port})
|
||||
for router in plugin.get_routers(cxt):
|
||||
notifier.info(cxt, 'router.exists', {'router': router})
|
||||
for floatingip in plugin.get_floatingips(cxt):
|
||||
notifier.info(cxt, 'floatingip.exists', {'floatingip': floatingip})
|
|
@ -1,190 +0,0 @@
|
|||
# Copyright 2011 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Routines for configuring Neutron
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.db import options as db_options
|
||||
from oslo import messaging
|
||||
from paste import deploy
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import utils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import version
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
core_opts = [
|
||||
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.IntOpt('bind_port', default=9696,
|
||||
help=_("The port to bind to")),
|
||||
cfg.StrOpt('api_paste_config', default="api-paste.ini",
|
||||
help=_("The API paste config file to use")),
|
||||
cfg.StrOpt('api_extensions_path', default="",
|
||||
help=_("The path for API extensions")),
|
||||
cfg.StrOpt('policy_file', default="policy.json",
|
||||
help=_("The policy file to use")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.StrOpt('core_plugin',
|
||||
help=_("The core plugin Neutron will use")),
|
||||
cfg.ListOpt('service_plugins', default=[],
|
||||
help=_("The service plugins Neutron will use")),
|
||||
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
|
||||
help=_("The base MAC address Neutron will use for VIFs")),
|
||||
cfg.IntOpt('mac_generation_retries', default=16,
|
||||
help=_("How many times Neutron will retry MAC generation")),
|
||||
cfg.BoolOpt('allow_bulk', default=True,
|
||||
help=_("Allow the usage of the bulk API")),
|
||||
cfg.BoolOpt('allow_pagination', default=False,
|
||||
help=_("Allow the usage of the pagination")),
|
||||
cfg.BoolOpt('allow_sorting', default=False,
|
||||
help=_("Allow the usage of the sorting")),
|
||||
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||
help=_("The maximum number of items returned in a single "
|
||||
"response, value was 'infinite' or negative integer "
|
||||
"means no limit")),
|
||||
cfg.IntOpt('max_dns_nameservers', default=5,
|
||||
help=_("Maximum number of DNS nameservers")),
|
||||
cfg.IntOpt('max_subnet_host_routes', default=20,
|
||||
help=_("Maximum number of host routes per subnet")),
|
||||
cfg.IntOpt('max_fixed_ips_per_port', default=5,
|
||||
help=_("Maximum number of fixed ips per port")),
|
||||
cfg.IntOpt('dhcp_lease_duration', default=86400,
|
||||
deprecated_name='dhcp_lease_time',
|
||||
help=_("DHCP lease duration (in seconds). Use -1 to tell "
|
||||
"dnsmasq to use infinite lease times.")),
|
||||
cfg.BoolOpt('dhcp_agent_notification', default=True,
|
||||
help=_("Allow sending resource operation"
|
||||
" notification to DHCP agent")),
|
||||
cfg.BoolOpt('allow_overlapping_ips', default=False,
|
||||
help=_("Allow overlapping IP support in Neutron")),
|
||||
cfg.StrOpt('host', default=utils.get_hostname(),
|
||||
help=_("The hostname Neutron is running on")),
|
||||
cfg.BoolOpt('force_gateway_on_subnet', default=False,
|
||||
help=_("Ensure that configured gateway is on subnet")),
|
||||
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
|
||||
help=_("Send notification to nova when port status changes")),
|
||||
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
|
||||
help=_("Send notification to nova when port data (fixed_ips/"
|
||||
"floatingip) changes so nova can update its cache.")),
|
||||
cfg.StrOpt('nova_url',
|
||||
default='http://127.0.0.1:8774/v2',
|
||||
help=_('URL for connection to nova')),
|
||||
cfg.StrOpt('nova_admin_username',
|
||||
help=_('Username for connecting to nova in admin context')),
|
||||
cfg.StrOpt('nova_admin_password',
|
||||
help=_('Password for connection to nova in admin context'),
|
||||
secret=True),
|
||||
cfg.StrOpt('nova_admin_tenant_id',
|
||||
help=_('The uuid of the admin nova tenant')),
|
||||
cfg.StrOpt('nova_admin_auth_url',
|
||||
default='http://localhost:5000/v2.0',
|
||||
help=_('Authorization URL for connecting to nova in admin '
|
||||
'context')),
|
||||
cfg.StrOpt('nova_ca_certificates_file',
|
||||
help=_('CA file for novaclient to verify server certificates')),
|
||||
cfg.BoolOpt('nova_api_insecure', default=False,
|
||||
help=_("If True, ignore any SSL validation issues")),
|
||||
cfg.StrOpt('nova_region_name',
|
||||
help=_('Name of nova region to use. Useful if keystone manages'
|
||||
' more than one region.')),
|
||||
cfg.IntOpt('send_events_interval', default=2,
|
||||
help=_('Number of seconds between sending events to nova if '
|
||||
'there are any events to send.')),
|
||||
]
|
||||
|
||||
core_cli_opts = [
|
||||
cfg.StrOpt('state_path',
|
||||
default='/var/lib/neutron',
|
||||
help=_("Where to store Neutron state files. "
|
||||
"This directory must be writable by the agent.")),
|
||||
]
|
||||
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(core_opts)
|
||||
cfg.CONF.register_cli_opts(core_cli_opts)
|
||||
|
||||
# Ensure that the control exchange is set correctly
|
||||
messaging.set_transport_defaults(control_exchange='neutron')
|
||||
_SQL_CONNECTION_DEFAULT = 'sqlite://'
|
||||
# Update the default QueuePool parameters. These can be tweaked by the
|
||||
# configuration variables - max_pool_size, max_overflow and pool_timeout
|
||||
db_options.set_defaults(cfg.CONF,
|
||||
connection=_SQL_CONNECTION_DEFAULT,
|
||||
sqlite_db='', max_pool_size=10,
|
||||
max_overflow=20, pool_timeout=10)
|
||||
|
||||
|
||||
def init(args, **kwargs):
|
||||
cfg.CONF(args=args, project='neutron',
|
||||
version='%%prog %s' % version.version_info.release_string(),
|
||||
**kwargs)
|
||||
|
||||
# FIXME(ihrachys): if import is put in global, circular import
|
||||
# failure occurs
|
||||
from neutron.common import rpc as n_rpc
|
||||
n_rpc.init(cfg.CONF)
|
||||
|
||||
# Validate that the base_mac is of the correct format
|
||||
msg = attributes._validate_regex(cfg.CONF.base_mac,
|
||||
attributes.MAC_PATTERN)
|
||||
if msg:
|
||||
msg = _("Base MAC: %s") % msg
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def setup_logging(conf):
|
||||
"""Sets up the logging options for a log with supplied name.
|
||||
|
||||
:param conf: a cfg.ConfOpts object
|
||||
"""
|
||||
product_name = "neutron"
|
||||
logging.setup(product_name)
|
||||
LOG.info(_("Logging enabled!"))
|
||||
|
||||
|
||||
def load_paste_app(app_name):
|
||||
"""Builds and returns a WSGI app from a paste config file.
|
||||
|
||||
:param app_name: Name of the application to load
|
||||
:raises ConfigFilesNotFoundError when config file cannot be located
|
||||
:raises RuntimeError when application cannot be loaded from config file
|
||||
"""
|
||||
|
||||
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
|
||||
if not config_path:
|
||||
raise cfg.ConfigFilesNotFoundError(
|
||||
config_files=[cfg.CONF.api_paste_config])
|
||||
config_path = os.path.abspath(config_path)
|
||||
LOG.info(_("Config paste file: %s"), config_path)
|
||||
|
||||
try:
|
||||
app = deploy.loadapp("config:%s" % config_path, name=app_name)
|
||||
except (LookupError, ImportError):
|
||||
msg = (_("Unable to load %(app_name)s from "
|
||||
"configuration file %(config_path)s.") %
|
||||
{'app_name': app_name,
|
||||
'config_path': config_path})
|
||||
LOG.exception(msg)
|
||||
raise RuntimeError(msg)
|
||||
return app
|
|
@ -1,131 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# TODO(salv-orlando): Verify if a single set of operational
|
||||
# status constants is achievable
|
||||
NET_STATUS_ACTIVE = 'ACTIVE'
|
||||
NET_STATUS_BUILD = 'BUILD'
|
||||
NET_STATUS_DOWN = 'DOWN'
|
||||
NET_STATUS_ERROR = 'ERROR'
|
||||
|
||||
PORT_STATUS_ACTIVE = 'ACTIVE'
|
||||
PORT_STATUS_BUILD = 'BUILD'
|
||||
PORT_STATUS_DOWN = 'DOWN'
|
||||
PORT_STATUS_ERROR = 'ERROR'
|
||||
|
||||
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
|
||||
FLOATINGIP_STATUS_DOWN = 'DOWN'
|
||||
FLOATINGIP_STATUS_ERROR = 'ERROR'
|
||||
|
||||
DEVICE_OWNER_ROUTER_INTF = "network:router_interface"
|
||||
DEVICE_OWNER_ROUTER_GW = "network:router_gateway"
|
||||
DEVICE_OWNER_FLOATINGIP = "network:floatingip"
|
||||
DEVICE_OWNER_DHCP = "network:dhcp"
|
||||
DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
|
||||
DEVICE_OWNER_ROUTER_INTF_DISTRIBUTED = "network:router_interface_distributed"
|
||||
DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
|
||||
DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
|
||||
|
||||
DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
|
||||
|
||||
FLOATINGIP_KEY = '_floatingips'
|
||||
INTERFACE_KEY = '_interfaces'
|
||||
METERING_LABEL_KEY = '_metering_labels'
|
||||
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
|
||||
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
|
||||
|
||||
IPv4 = 'IPv4'
|
||||
IPv6 = 'IPv6'
|
||||
|
||||
DHCP_RESPONSE_PORT = 68
|
||||
|
||||
MIN_VLAN_TAG = 1
|
||||
MAX_VLAN_TAG = 4094
|
||||
MAX_VXLAN_VNI = 16777215
|
||||
FLOODING_ENTRY = ['00:00:00:00:00:00', '0.0.0.0']
|
||||
|
||||
EXT_NS_COMP = '_backward_comp_e_ns'
|
||||
EXT_NS = '_extension_ns'
|
||||
XML_NS_V20 = 'http://openstack.org/quantum/api/v2.0'
|
||||
XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
|
||||
XSI_ATTR = "xsi:nil"
|
||||
XSI_NIL_ATTR = "xmlns:xsi"
|
||||
ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
|
||||
ATOM_XMLNS = "xmlns:atom"
|
||||
ATOM_LINK_NOTATION = "{%s}link" % ATOM_NAMESPACE
|
||||
TYPE_XMLNS = "xmlns:quantum"
|
||||
TYPE_ATTR = "quantum:type"
|
||||
VIRTUAL_ROOT_KEY = "_v_root"
|
||||
|
||||
TYPE_BOOL = "bool"
|
||||
TYPE_INT = "int"
|
||||
TYPE_LONG = "long"
|
||||
TYPE_FLOAT = "float"
|
||||
TYPE_LIST = "list"
|
||||
TYPE_DICT = "dict"
|
||||
|
||||
AGENT_TYPE_DHCP = 'DHCP agent'
|
||||
AGENT_TYPE_OVS = 'Open vSwitch agent'
|
||||
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
|
||||
AGENT_TYPE_HYPERV = 'HyperV agent'
|
||||
AGENT_TYPE_NEC = 'NEC plugin agent'
|
||||
AGENT_TYPE_OFA = 'OFA driver agent'
|
||||
AGENT_TYPE_L3 = 'L3 agent'
|
||||
AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
|
||||
AGENT_TYPE_MLNX = 'Mellanox plugin agent'
|
||||
AGENT_TYPE_METERING = 'Metering agent'
|
||||
AGENT_TYPE_METADATA = 'Metadata agent'
|
||||
AGENT_TYPE_SDNVE = 'IBM SDN-VE agent'
|
||||
L2_AGENT_TOPIC = 'N/A'
|
||||
|
||||
PAGINATION_INFINITE = 'infinite'
|
||||
|
||||
SORT_DIRECTION_ASC = 'asc'
|
||||
SORT_DIRECTION_DESC = 'desc'
|
||||
|
||||
PORT_BINDING_EXT_ALIAS = 'binding'
|
||||
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
|
||||
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
|
||||
LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler'
|
||||
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
|
||||
|
||||
# Protocol names and numbers for Security Groups/Firewalls
|
||||
PROTO_NAME_TCP = 'tcp'
|
||||
PROTO_NAME_ICMP = 'icmp'
|
||||
PROTO_NAME_ICMP_V6 = 'icmpv6'
|
||||
PROTO_NAME_UDP = 'udp'
|
||||
PROTO_NUM_TCP = 6
|
||||
PROTO_NUM_ICMP = 1
|
||||
PROTO_NUM_ICMP_V6 = 58
|
||||
PROTO_NUM_UDP = 17
|
||||
|
||||
# List of ICMPv6 types that should be allowed by default:
|
||||
# Multicast Listener Query (130),
|
||||
# Multicast Listener Report (131),
|
||||
# Multicast Listener Done (132),
|
||||
# Neighbor Solicitation (135),
|
||||
# Neighbor Advertisement (136)
|
||||
ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
|
||||
ICMPV6_TYPE_RA = 134
|
||||
|
||||
DHCPV6_STATEFUL = 'dhcpv6-stateful'
|
||||
DHCPV6_STATELESS = 'dhcpv6-stateless'
|
||||
IPV6_SLAAC = 'slaac'
|
||||
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
|
||||
|
||||
IPV6_LLA_PREFIX = 'fe80::/64'
|
||||
|
||||
# Linux interface max length
|
||||
DEVICE_NAME_MAX_LEN = 15
|
|
@ -1,321 +0,0 @@
|
|||
# Copyright 2011 VMware, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Neutron base exception handling.
|
||||
"""
|
||||
|
||||
from neutron.openstack.common import excutils
|
||||
|
||||
|
||||
class NeutronException(Exception):
|
||||
"""Base Neutron Exception.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
super(NeutronException, self).__init__(self.message % kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if not self.use_fatal_exceptions():
|
||||
ctxt.reraise = False
|
||||
# at least get the core message out if something happened
|
||||
super(NeutronException, self).__init__(self.message)
|
||||
|
||||
def __unicode__(self):
|
||||
return unicode(self.msg)
|
||||
|
||||
def use_fatal_exceptions(self):
|
||||
return False
|
||||
|
||||
|
||||
class BadRequest(NeutronException):
|
||||
message = _('Bad %(resource)s request: %(msg)s')
|
||||
|
||||
|
||||
class NotFound(NeutronException):
|
||||
pass
|
||||
|
||||
|
||||
class Conflict(NeutronException):
|
||||
pass
|
||||
|
||||
|
||||
class NotAuthorized(NeutronException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class ServiceUnavailable(NeutronException):
|
||||
message = _("The service is unavailable")
|
||||
|
||||
|
||||
class AdminRequired(NotAuthorized):
|
||||
message = _("User does not have admin privileges: %(reason)s")
|
||||
|
||||
|
||||
class PolicyNotAuthorized(NotAuthorized):
|
||||
message = _("Policy doesn't allow %(action)s to be performed.")
|
||||
|
||||
|
||||
class NetworkNotFound(NotFound):
|
||||
message = _("Network %(net_id)s could not be found")
|
||||
|
||||
|
||||
class SubnetNotFound(NotFound):
|
||||
message = _("Subnet %(subnet_id)s could not be found")
|
||||
|
||||
|
||||
class PortNotFound(NotFound):
|
||||
message = _("Port %(port_id)s could not be found")
|
||||
|
||||
|
||||
class PortNotFoundOnNetwork(NotFound):
|
||||
message = _("Port %(port_id)s could not be found "
|
||||
"on network %(net_id)s")
|
||||
|
||||
|
||||
class PolicyFileNotFound(NotFound):
|
||||
message = _("Policy configuration policy.json could not be found")
|
||||
|
||||
|
||||
class PolicyInitError(NeutronException):
|
||||
message = _("Failed to init policy %(policy)s because %(reason)s")
|
||||
|
||||
|
||||
class PolicyCheckError(NeutronException):
|
||||
message = _("Failed to check policy %(policy)s because %(reason)s")
|
||||
|
||||
|
||||
class StateInvalid(BadRequest):
|
||||
message = _("Unsupported port state: %(port_state)s")
|
||||
|
||||
|
||||
class InUse(NeutronException):
|
||||
message = _("The resource is inuse")
|
||||
|
||||
|
||||
class NetworkInUse(InUse):
|
||||
message = _("Unable to complete operation on network %(net_id)s. "
|
||||
"There are one or more ports still in use on the network.")
|
||||
|
||||
|
||||
class SubnetInUse(InUse):
|
||||
message = _("Unable to complete operation on subnet %(subnet_id)s. "
|
||||
"One or more ports have an IP allocation from this subnet.")
|
||||
|
||||
|
||||
class PortInUse(InUse):
|
||||
message = _("Unable to complete operation on port %(port_id)s "
|
||||
"for network %(net_id)s. Port already has an attached"
|
||||
"device %(device_id)s.")
|
||||
|
||||
|
||||
class MacAddressInUse(InUse):
|
||||
message = _("Unable to complete operation for network %(net_id)s. "
|
||||
"The mac address %(mac)s is in use.")
|
||||
|
||||
|
||||
class HostRoutesExhausted(BadRequest):
|
||||
# NOTE(xchenum): probably make sense to use quota exceeded exception?
|
||||
message = _("Unable to complete operation for %(subnet_id)s. "
|
||||
"The number of host routes exceeds the limit %(quota)s.")
|
||||
|
||||
|
||||
class DNSNameServersExhausted(BadRequest):
|
||||
# NOTE(xchenum): probably make sense to use quota exceeded exception?
|
||||
message = _("Unable to complete operation for %(subnet_id)s. "
|
||||
"The number of DNS nameservers exceeds the limit %(quota)s.")
|
||||
|
||||
|
||||
class IpAddressInUse(InUse):
|
||||
message = _("Unable to complete operation for network %(net_id)s. "
|
||||
"The IP address %(ip_address)s is in use.")
|
||||
|
||||
|
||||
class VlanIdInUse(InUse):
|
||||
message = _("Unable to create the network. "
|
||||
"The VLAN %(vlan_id)s on physical network "
|
||||
"%(physical_network)s is in use.")
|
||||
|
||||
|
||||
class FlatNetworkInUse(InUse):
|
||||
message = _("Unable to create the flat network. "
|
||||
"Physical network %(physical_network)s is in use.")
|
||||
|
||||
|
||||
class TunnelIdInUse(InUse):
|
||||
message = _("Unable to create the network. "
|
||||
"The tunnel ID %(tunnel_id)s is in use.")
|
||||
|
||||
|
||||
class TenantNetworksDisabled(ServiceUnavailable):
|
||||
message = _("Tenant network creation is not enabled.")
|
||||
|
||||
|
||||
class ResourceExhausted(ServiceUnavailable):
|
||||
pass
|
||||
|
||||
|
||||
class NoNetworkAvailable(ResourceExhausted):
|
||||
message = _("Unable to create the network. "
|
||||
"No tenant network is available for allocation.")
|
||||
|
||||
|
||||
class SubnetMismatchForPort(BadRequest):
|
||||
message = _("Subnet on port %(port_id)s does not match "
|
||||
"the requested subnet %(subnet_id)s")
|
||||
|
||||
|
||||
class MalformedRequestBody(BadRequest):
|
||||
message = _("Malformed request body: %(reason)s")
|
||||
|
||||
|
||||
class Invalid(NeutronException):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
super(Invalid, self).__init__()
|
||||
|
||||
|
||||
class InvalidInput(BadRequest):
|
||||
message = _("Invalid input for operation: %(error_message)s.")
|
||||
|
||||
|
||||
class InvalidAllocationPool(BadRequest):
|
||||
message = _("The allocation pool %(pool)s is not valid.")
|
||||
|
||||
|
||||
class OverlappingAllocationPools(Conflict):
|
||||
message = _("Found overlapping allocation pools:"
|
||||
"%(pool_1)s %(pool_2)s for subnet %(subnet_cidr)s.")
|
||||
|
||||
|
||||
class OutOfBoundsAllocationPool(BadRequest):
|
||||
message = _("The allocation pool %(pool)s spans "
|
||||
"beyond the subnet cidr %(subnet_cidr)s.")
|
||||
|
||||
|
||||
class MacAddressGenerationFailure(ServiceUnavailable):
|
||||
message = _("Unable to generate unique mac on network %(net_id)s.")
|
||||
|
||||
|
||||
class IpAddressGenerationFailure(Conflict):
|
||||
message = _("No more IP addresses available on network %(net_id)s.")
|
||||
|
||||
|
||||
class BridgeDoesNotExist(NeutronException):
|
||||
message = _("Bridge %(bridge)s does not exist.")
|
||||
|
||||
|
||||
class PreexistingDeviceFailure(NeutronException):
|
||||
message = _("Creation failed. %(dev_name)s already exists.")
|
||||
|
||||
|
||||
class SudoRequired(NeutronException):
|
||||
message = _("Sudo privilege is required to run this command.")
|
||||
|
||||
|
||||
class QuotaResourceUnknown(NotFound):
|
||||
message = _("Unknown quota resources %(unknown)s.")
|
||||
|
||||
|
||||
class OverQuota(Conflict):
|
||||
message = _("Quota exceeded for resources: %(overs)s")
|
||||
|
||||
|
||||
class QuotaMissingTenant(BadRequest):
|
||||
message = _("Tenant-id was missing from Quota request")
|
||||
|
||||
|
||||
class InvalidQuotaValue(Conflict):
|
||||
message = _("Change would make usage less than 0 for the following "
|
||||
"resources: %(unders)s")
|
||||
|
||||
|
||||
class InvalidSharedSetting(Conflict):
|
||||
message = _("Unable to reconfigure sharing settings for network "
|
||||
"%(network)s. Multiple tenants are using it")
|
||||
|
||||
|
||||
class InvalidExtensionEnv(BadRequest):
|
||||
message = _("Invalid extension environment: %(reason)s")
|
||||
|
||||
|
||||
class ExtensionsNotFound(NotFound):
|
||||
message = _("Extensions not found: %(extensions)s")
|
||||
|
||||
|
||||
class InvalidContentType(NeutronException):
|
||||
message = _("Invalid content type %(content_type)s")
|
||||
|
||||
|
||||
class ExternalIpAddressExhausted(BadRequest):
|
||||
message = _("Unable to find any IP address on external "
|
||||
"network %(net_id)s.")
|
||||
|
||||
|
||||
class TooManyExternalNetworks(NeutronException):
|
||||
message = _("More than one external network exists")
|
||||
|
||||
|
||||
class InvalidConfigurationOption(NeutronException):
|
||||
message = _("An invalid value was provided for %(opt_name)s: "
|
||||
"%(opt_value)s")
|
||||
|
||||
|
||||
class GatewayConflictWithAllocationPools(InUse):
|
||||
message = _("Gateway ip %(ip_address)s conflicts with "
|
||||
"allocation pool %(pool)s")
|
||||
|
||||
|
||||
class GatewayIpInUse(InUse):
|
||||
message = _("Current gateway ip %(ip_address)s already in use "
|
||||
"by port %(port_id)s. Unable to update.")
|
||||
|
||||
|
||||
class NetworkVlanRangeError(NeutronException):
|
||||
message = _("Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Convert vlan_range tuple to 'start:end' format for display
|
||||
if isinstance(kwargs['vlan_range'], tuple):
|
||||
kwargs['vlan_range'] = "%d:%d" % kwargs['vlan_range']
|
||||
super(NetworkVlanRangeError, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class NetworkVxlanPortRangeError(NeutronException):
|
||||
message = _("Invalid network VXLAN port range: '%(vxlan_range)s'")
|
||||
|
||||
|
||||
class VxlanNetworkUnsupported(NeutronException):
|
||||
message = _("VXLAN Network unsupported.")
|
||||
|
||||
|
||||
class DuplicatedExtension(NeutronException):
|
||||
message = _("Found duplicate extension: %(alias)s")
|
||||
|
||||
|
||||
class DeviceIDNotOwnedByTenant(Conflict):
|
||||
message = _("The following device_id %(device_id)s is not owned by your "
|
||||
"tenant or matches another tenants router.")
|
||||
|
||||
|
||||
class InvalidCIDR(BadRequest):
|
||||
message = _("Invalid CIDR %(input)s given as IP prefix")
|
|
@ -1,39 +0,0 @@
|
|||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
IPv6-related utilities and helper functions.
|
||||
"""
|
||||
|
||||
import netaddr
|
||||
|
||||
|
||||
def get_ipv6_addr_by_EUI64(prefix, mac):
|
||||
# Check if the prefix is IPv4 address
|
||||
isIPv4 = netaddr.valid_ipv4(prefix)
|
||||
if isIPv4:
|
||||
msg = _("Unable to generate IP address by EUI64 for IPv4 prefix")
|
||||
raise TypeError(msg)
|
||||
try:
|
||||
eui64 = int(netaddr.EUI(mac).eui64())
|
||||
prefix = netaddr.IPNetwork(prefix)
|
||||
return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
|
||||
except (ValueError, netaddr.AddrFormatError):
|
||||
raise TypeError(_('Bad prefix or mac format for generating IPv6 '
|
||||
'address by EUI-64: %(prefix)s, %(mac)s:')
|
||||
% {'prefix': prefix, 'mac': mac})
|
||||
except TypeError:
|
||||
raise TypeError(_('Bad prefix type for generate IPv6 address by '
|
||||
'EUI-64: %s') % prefix)
|
|
@ -1,278 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Copyright (c) 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo import messaging
|
||||
from oslo.messaging import serializer as om_serializer
|
||||
|
||||
from neutron.common import exceptions
|
||||
from neutron import context
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
TRANSPORT = None
|
||||
NOTIFIER = None
|
||||
|
||||
ALLOWED_EXMODS = [
|
||||
exceptions.__name__,
|
||||
]
|
||||
EXTRA_EXMODS = []
|
||||
|
||||
|
||||
TRANSPORT_ALIASES = {
|
||||
'neutron.openstack.common.rpc.impl_fake': 'fake',
|
||||
'neutron.openstack.common.rpc.impl_qpid': 'qpid',
|
||||
'neutron.openstack.common.rpc.impl_kombu': 'rabbit',
|
||||
'neutron.openstack.common.rpc.impl_zmq': 'zmq',
|
||||
'neutron.rpc.impl_fake': 'fake',
|
||||
'neutron.rpc.impl_qpid': 'qpid',
|
||||
'neutron.rpc.impl_kombu': 'rabbit',
|
||||
'neutron.rpc.impl_zmq': 'zmq',
|
||||
}
|
||||
|
||||
|
||||
def init(conf):
|
||||
global TRANSPORT, NOTIFIER
|
||||
exmods = get_allowed_exmods()
|
||||
TRANSPORT = messaging.get_transport(conf,
|
||||
allowed_remote_exmods=exmods,
|
||||
aliases=TRANSPORT_ALIASES)
|
||||
serializer = RequestContextSerializer()
|
||||
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
|
||||
|
||||
|
||||
def cleanup():
|
||||
global TRANSPORT, NOTIFIER
|
||||
assert TRANSPORT is not None
|
||||
assert NOTIFIER is not None
|
||||
TRANSPORT.cleanup()
|
||||
TRANSPORT = NOTIFIER = None
|
||||
|
||||
|
||||
def add_extra_exmods(*args):
|
||||
EXTRA_EXMODS.extend(args)
|
||||
|
||||
|
||||
def clear_extra_exmods():
|
||||
del EXTRA_EXMODS[:]
|
||||
|
||||
|
||||
def get_allowed_exmods():
|
||||
return ALLOWED_EXMODS + EXTRA_EXMODS
|
||||
|
||||
|
||||
def get_client(target, version_cap=None, serializer=None):
|
||||
assert TRANSPORT is not None
|
||||
serializer = RequestContextSerializer(serializer)
|
||||
return messaging.RPCClient(TRANSPORT,
|
||||
target,
|
||||
version_cap=version_cap,
|
||||
serializer=serializer)
|
||||
|
||||
|
||||
def get_server(target, endpoints, serializer=None):
|
||||
assert TRANSPORT is not None
|
||||
serializer = RequestContextSerializer(serializer)
|
||||
return messaging.get_rpc_server(TRANSPORT,
|
||||
target,
|
||||
endpoints,
|
||||
executor='eventlet',
|
||||
serializer=serializer)
|
||||
|
||||
|
||||
def get_notifier(service=None, host=None, publisher_id=None):
|
||||
assert NOTIFIER is not None
|
||||
if not publisher_id:
|
||||
publisher_id = "%s.%s" % (service, host or cfg.CONF.host)
|
||||
return NOTIFIER.prepare(publisher_id=publisher_id)
|
||||
|
||||
|
||||
class RequestContextSerializer(om_serializer.Serializer):
|
||||
"""This serializer is used to convert RPC common context into
|
||||
Neutron Context.
|
||||
"""
|
||||
def __init__(self, base=None):
|
||||
super(RequestContextSerializer, self).__init__()
|
||||
self._base = base
|
||||
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
if not self._base:
|
||||
return entity
|
||||
return self._base.serialize_entity(ctxt, entity)
|
||||
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
if not self._base:
|
||||
return entity
|
||||
return self._base.deserialize_entity(ctxt, entity)
|
||||
|
||||
def serialize_context(self, ctxt):
|
||||
return ctxt.to_dict()
|
||||
|
||||
def deserialize_context(self, ctxt):
|
||||
rpc_ctxt_dict = ctxt.copy()
|
||||
user_id = rpc_ctxt_dict.pop('user_id', None)
|
||||
if not user_id:
|
||||
user_id = rpc_ctxt_dict.pop('user', None)
|
||||
tenant_id = rpc_ctxt_dict.pop('tenant_id', None)
|
||||
if not tenant_id:
|
||||
tenant_id = rpc_ctxt_dict.pop('project_id', None)
|
||||
return context.Context(user_id, tenant_id,
|
||||
load_admin_roles=False, **rpc_ctxt_dict)
|
||||
|
||||
|
||||
class RpcProxy(object):
|
||||
'''
|
||||
This class is created to facilitate migration from oslo-incubator
|
||||
RPC layer implementation to oslo.messaging and is intended to
|
||||
emulate RpcProxy class behaviour using oslo.messaging API once the
|
||||
migration is applied.
|
||||
'''
|
||||
RPC_API_NAMESPACE = None
|
||||
|
||||
def __init__(self, topic, default_version, version_cap=None):
|
||||
self.topic = topic
|
||||
target = messaging.Target(topic=topic, version=default_version)
|
||||
self._client = get_client(target, version_cap=version_cap)
|
||||
|
||||
def make_msg(self, method, **kwargs):
|
||||
return {'method': method,
|
||||
'namespace': self.RPC_API_NAMESPACE,
|
||||
'args': kwargs}
|
||||
|
||||
def call(self, context, msg, **kwargs):
|
||||
return self.__call_rpc_method(
|
||||
context, msg, rpc_method='call', **kwargs)
|
||||
|
||||
def cast(self, context, msg, **kwargs):
|
||||
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
|
||||
|
||||
def fanout_cast(self, context, msg, **kwargs):
|
||||
kwargs['fanout'] = True
|
||||
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
|
||||
|
||||
def __call_rpc_method(self, context, msg, **kwargs):
|
||||
options = dict(
|
||||
((opt, kwargs[opt])
|
||||
for opt in ('fanout', 'timeout', 'topic', 'version')
|
||||
if kwargs.get(opt))
|
||||
)
|
||||
if msg['namespace']:
|
||||
options['namespace'] = msg['namespace']
|
||||
|
||||
if options:
|
||||
callee = self._client.prepare(**options)
|
||||
else:
|
||||
callee = self._client
|
||||
|
||||
func = getattr(callee, kwargs['rpc_method'])
|
||||
return func(context, msg['method'], **msg['args'])
|
||||
|
||||
|
||||
class RpcCallback(object):
|
||||
'''
|
||||
This class is created to facilitate migration from oslo-incubator
|
||||
RPC layer implementation to oslo.messaging and is intended to set
|
||||
callback version using oslo.messaging API once the migration is
|
||||
applied.
|
||||
'''
|
||||
RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self):
|
||||
super(RpcCallback, self).__init__()
|
||||
self.target = messaging.Target(version=self.RPC_API_VERSION)
|
||||
|
||||
|
||||
class Service(service.Service):
|
||||
"""Service object for binaries running on hosts.
|
||||
|
||||
A service enables rpc by listening to queues based on topic and host.
|
||||
"""
|
||||
def __init__(self, host, topic, manager=None, serializer=None):
|
||||
super(Service, self).__init__()
|
||||
self.host = host
|
||||
self.topic = topic
|
||||
self.serializer = serializer
|
||||
if manager is None:
|
||||
self.manager = self
|
||||
else:
|
||||
self.manager = manager
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
|
||||
self.conn = create_connection(new=True)
|
||||
LOG.debug("Creating Consumer connection for Service %s" %
|
||||
self.topic)
|
||||
|
||||
endpoints = [self.manager]
|
||||
|
||||
# Share this same connection for these Consumers
|
||||
self.conn.create_consumer(self.topic, endpoints, fanout=False)
|
||||
|
||||
node_topic = '%s.%s' % (self.topic, self.host)
|
||||
self.conn.create_consumer(node_topic, endpoints, fanout=False)
|
||||
|
||||
self.conn.create_consumer(self.topic, endpoints, fanout=True)
|
||||
|
||||
# Hook to allow the manager to do other initializations after
|
||||
# the rpc connection is created.
|
||||
if callable(getattr(self.manager, 'initialize_service_hook', None)):
|
||||
self.manager.initialize_service_hook(self)
|
||||
|
||||
# Consume from all consumers in threads
|
||||
self.conn.consume_in_threads()
|
||||
|
||||
def stop(self):
|
||||
# Try to shut the connection down, but if we get any sort of
|
||||
# errors, go ahead and ignore them.. as we're shutting down anyway
|
||||
try:
|
||||
self.conn.close()
|
||||
except Exception:
|
||||
pass
|
||||
super(Service, self).stop()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
|
||||
def __init__(self):
|
||||
super(Connection, self).__init__()
|
||||
self.servers = []
|
||||
|
||||
def create_consumer(self, topic, endpoints, fanout=False):
|
||||
target = messaging.Target(
|
||||
topic=topic, server=cfg.CONF.host, fanout=fanout)
|
||||
server = get_server(target, endpoints)
|
||||
self.servers.append(server)
|
||||
|
||||
def consume_in_threads(self):
|
||||
for server in self.servers:
|
||||
server.start()
|
||||
return self.servers
|
||||
|
||||
|
||||
# functions
|
||||
def create_connection(new=True):
|
||||
return Connection()
|
||||
|
||||
|
||||
# exceptions
|
||||
RPCException = messaging.MessagingException
|
||||
RemoteError = messaging.RemoteError
|
||||
MessagingTimeout = messaging.MessagingTimeout
|
|
@ -1,42 +0,0 @@
|
|||
# Copyright (c) 2010 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Colorizer Code is borrowed from Twisted:
|
||||
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
# describes parameters used by different unit/functional tests
|
||||
# a plugin-specific testing mechanism should import this dictionary
|
||||
# and override the values in it if needed (e.g., run_tests.py in
|
||||
# neutron/plugins/openvswitch/ )
|
||||
test_config = {}
|
|
@ -1,58 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NETWORK = 'network'
|
||||
SUBNET = 'subnet'
|
||||
PORT = 'port'
|
||||
SECURITY_GROUP = 'security_group'
|
||||
L2POPULATION = 'l2population'
|
||||
DVR = 'dvr'
|
||||
|
||||
CREATE = 'create'
|
||||
DELETE = 'delete'
|
||||
UPDATE = 'update'
|
||||
|
||||
AGENT = 'q-agent-notifier'
|
||||
PLUGIN = 'q-plugin'
|
||||
L3PLUGIN = 'q-l3-plugin'
|
||||
DHCP = 'q-dhcp-notifer'
|
||||
FIREWALL_PLUGIN = 'q-firewall-plugin'
|
||||
METERING_PLUGIN = 'q-metering-plugin'
|
||||
LOADBALANCER_PLUGIN = 'n-lbaas-plugin'
|
||||
|
||||
L3_AGENT = 'l3_agent'
|
||||
DHCP_AGENT = 'dhcp_agent'
|
||||
METERING_AGENT = 'metering_agent'
|
||||
LOADBALANCER_AGENT = 'n-lbaas_agent'
|
||||
|
||||
|
||||
def get_topic_name(prefix, table, operation, host=None):
|
||||
"""Create a topic name.
|
||||
|
||||
The topic name needs to be synced between the agent and the
|
||||
plugin. The plugin will send a fanout message to all of the
|
||||
listening agents so that the agents in turn can perform their
|
||||
updates accordingly.
|
||||
|
||||
:param prefix: Common prefix for the plugin/agent message queues.
|
||||
:param table: The table in question (NETWORK, SUBNET, PORT).
|
||||
:param operation: The operation that invokes notification (CREATE,
|
||||
DELETE, UPDATE)
|
||||
:param host: Add host to the topic
|
||||
:returns: The topic name.
|
||||
"""
|
||||
if host:
|
||||
return '%s-%s-%s.%s' % (prefix, table, operation, host)
|
||||
return '%s-%s-%s' % (prefix, table, operation)
|
|
@ -1,310 +0,0 @@
|
|||
# Copyright 2011, VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Borrowed from nova code base, more utilities will be added/borrowed as and
|
||||
# when needed.
|
||||
|
||||
"""Utilities and helper functions."""
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import hashlib
|
||||
import logging as std_logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import socket
|
||||
import uuid
|
||||
|
||||
from eventlet.green import subprocess
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import constants as q_const
|
||||
from neutron.openstack.common import lockutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
||||
LOG = logging.getLogger(__name__)
|
||||
SYNCHRONIZED_PREFIX = 'neutron-'
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
|
||||
|
||||
|
||||
class cache_method_results(object):
|
||||
"""This decorator is intended for object methods only."""
|
||||
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
functools.update_wrapper(self, func)
|
||||
self._first_call = True
|
||||
self._not_cached = object()
|
||||
|
||||
def _get_from_cache(self, target_self, *args, **kwargs):
|
||||
func_name = "%(module)s.%(class)s.%(func_name)s" % {
|
||||
'module': target_self.__module__,
|
||||
'class': target_self.__class__.__name__,
|
||||
'func_name': self.func.__name__,
|
||||
}
|
||||
key = (func_name,) + args
|
||||
if kwargs:
|
||||
key += dict2tuple(kwargs)
|
||||
try:
|
||||
item = target_self._cache.get(key, self._not_cached)
|
||||
except TypeError:
|
||||
LOG.debug(_("Method %(func_name)s cannot be cached due to "
|
||||
"unhashable parameters: args: %(args)s, kwargs: "
|
||||
"%(kwargs)s"),
|
||||
{'func_name': func_name,
|
||||
'args': args,
|
||||
'kwargs': kwargs})
|
||||
return self.func(target_self, *args, **kwargs)
|
||||
|
||||
if item is self._not_cached:
|
||||
item = self.func(target_self, *args, **kwargs)
|
||||
target_self._cache.set(key, item, None)
|
||||
|
||||
return item
|
||||
|
||||
def __call__(self, target_self, *args, **kwargs):
|
||||
if not hasattr(target_self, '_cache'):
|
||||
raise NotImplementedError(
|
||||
"Instance of class %(module)s.%(class)s must contain _cache "
|
||||
"attribute" % {
|
||||
'module': target_self.__module__,
|
||||
'class': target_self.__class__.__name__})
|
||||
if not target_self._cache:
|
||||
if self._first_call:
|
||||
LOG.debug(_("Instance of class %(module)s.%(class)s doesn't "
|
||||
"contain attribute _cache therefore results "
|
||||
"cannot be cached for %(func_name)s."),
|
||||
{'module': target_self.__module__,
|
||||
'class': target_self.__class__.__name__,
|
||||
'func_name': self.func.__name__})
|
||||
self._first_call = False
|
||||
return self.func(target_self, *args, **kwargs)
|
||||
return self._get_from_cache(target_self, *args, **kwargs)
|
||||
|
||||
def __get__(self, obj, objtype):
|
||||
return functools.partial(self.__call__, obj)
|
||||
|
||||
|
||||
def read_cached_file(filename, cache_info, reload_func=None):
|
||||
"""Read from a file if it has been modified.
|
||||
|
||||
:param cache_info: dictionary to hold opaque cache.
|
||||
:param reload_func: optional function to be called with data when
|
||||
file is reloaded due to a modification.
|
||||
|
||||
:returns: data from file
|
||||
|
||||
"""
|
||||
mtime = os.path.getmtime(filename)
|
||||
if not cache_info or mtime != cache_info.get('mtime'):
|
||||
LOG.debug(_("Reloading cached file %s"), filename)
|
||||
with open(filename) as fap:
|
||||
cache_info['data'] = fap.read()
|
||||
cache_info['mtime'] = mtime
|
||||
if reload_func:
|
||||
reload_func(cache_info['data'])
|
||||
return cache_info['data']
|
||||
|
||||
|
||||
def find_config_file(options, config_file):
|
||||
"""Return the first config file found.
|
||||
|
||||
We search for the paste config file in the following order:
|
||||
* If --config-file option is used, use that
|
||||
* Search for the configuration files via common cfg directories
|
||||
:retval Full path to config file, or None if no config file found
|
||||
"""
|
||||
fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
|
||||
if options.get('config_file'):
|
||||
if os.path.exists(options['config_file']):
|
||||
return fix_path(options['config_file'])
|
||||
|
||||
dir_to_common = os.path.dirname(os.path.abspath(__file__))
|
||||
root = os.path.join(dir_to_common, '..', '..', '..', '..')
|
||||
# Handle standard directory search for the config file
|
||||
config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')),
|
||||
fix_path(os.path.join('~', '.neutron-venv', 'etc',
|
||||
'neutron')),
|
||||
fix_path('~'),
|
||||
os.path.join(cfg.CONF.state_path, 'etc'),
|
||||
os.path.join(cfg.CONF.state_path, 'etc', 'neutron'),
|
||||
fix_path(os.path.join('~', '.local',
|
||||
'etc', 'neutron')),
|
||||
'/usr/etc/neutron',
|
||||
'/usr/local/etc/neutron',
|
||||
'/etc/neutron/',
|
||||
'/etc']
|
||||
|
||||
if 'plugin' in options:
|
||||
config_file_dirs = [
|
||||
os.path.join(x, 'neutron', 'plugins', options['plugin'])
|
||||
for x in config_file_dirs
|
||||
]
|
||||
|
||||
if os.path.exists(os.path.join(root, 'plugins')):
|
||||
plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc'))
|
||||
for p in os.listdir(os.path.join(root, 'plugins'))]
|
||||
plugins = [p for p in plugins if os.path.isdir(p)]
|
||||
config_file_dirs.extend(plugins)
|
||||
|
||||
for cfg_dir in config_file_dirs:
|
||||
cfg_file = os.path.join(cfg_dir, config_file)
|
||||
if os.path.exists(cfg_file):
|
||||
return cfg_file
|
||||
|
||||
|
||||
def _subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
# non-Python subprocesses expect.
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
|
||||
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
|
||||
env=None):
|
||||
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, preexec_fn=_subprocess_setup,
|
||||
close_fds=True, env=env)
|
||||
|
||||
|
||||
def parse_mappings(mapping_list, unique_values=True):
|
||||
"""Parse a list of of mapping strings into a dictionary.
|
||||
|
||||
:param mapping_list: a list of strings of the form '<key>:<value>'
|
||||
:param unique_values: values must be unique if True
|
||||
:returns: a dict mapping keys to values
|
||||
"""
|
||||
mappings = {}
|
||||
for mapping in mapping_list:
|
||||
mapping = mapping.strip()
|
||||
if not mapping:
|
||||
continue
|
||||
split_result = mapping.split(':')
|
||||
if len(split_result) != 2:
|
||||
raise ValueError(_("Invalid mapping: '%s'") % mapping)
|
||||
key = split_result[0].strip()
|
||||
if not key:
|
||||
raise ValueError(_("Missing key in mapping: '%s'") % mapping)
|
||||
value = split_result[1].strip()
|
||||
if not value:
|
||||
raise ValueError(_("Missing value in mapping: '%s'") % mapping)
|
||||
if key in mappings:
|
||||
raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
|
||||
"unique") % {'key': key, 'mapping': mapping})
|
||||
if unique_values and value in mappings.itervalues():
|
||||
raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
|
||||
"not unique") % {'value': value,
|
||||
'mapping': mapping})
|
||||
mappings[key] = value
|
||||
return mappings
|
||||
|
||||
|
||||
def get_hostname():
|
||||
return socket.gethostname()
|
||||
|
||||
|
||||
def compare_elements(a, b):
|
||||
"""Compare elements if a and b have same elements.
|
||||
|
||||
This method doesn't consider ordering
|
||||
"""
|
||||
if a is None:
|
||||
a = []
|
||||
if b is None:
|
||||
b = []
|
||||
return set(a) == set(b)
|
||||
|
||||
|
||||
def dict2str(dic):
|
||||
return ','.join("%s=%s" % (key, val)
|
||||
for key, val in sorted(dic.iteritems()))
|
||||
|
||||
|
||||
def str2dict(string):
|
||||
res_dict = {}
|
||||
for keyvalue in string.split(','):
|
||||
(key, value) = keyvalue.split('=', 1)
|
||||
res_dict[key] = value
|
||||
return res_dict
|
||||
|
||||
|
||||
def dict2tuple(d):
|
||||
items = d.items()
|
||||
items.sort()
|
||||
return tuple(items)
|
||||
|
||||
|
||||
def diff_list_of_dict(old_list, new_list):
|
||||
new_set = set([dict2str(l) for l in new_list])
|
||||
old_set = set([dict2str(l) for l in old_list])
|
||||
added = new_set - old_set
|
||||
removed = old_set - new_set
|
||||
return [str2dict(a) for a in added], [str2dict(r) for r in removed]
|
||||
|
||||
|
||||
def is_extension_supported(plugin, ext_alias):
|
||||
return ext_alias in getattr(
|
||||
plugin, "supported_extension_aliases", [])
|
||||
|
||||
|
||||
def log_opt_values(log):
|
||||
cfg.CONF.log_opt_values(log, std_logging.DEBUG)
|
||||
|
||||
|
||||
def is_valid_vlan_tag(vlan):
|
||||
return q_const.MIN_VLAN_TAG <= vlan <= q_const.MAX_VLAN_TAG
|
||||
|
||||
|
||||
def get_random_mac(base_mac):
|
||||
mac = [int(base_mac[0], 16), int(base_mac[1], 16),
|
||||
int(base_mac[2], 16), random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
|
||||
if base_mac[3] != '00':
|
||||
mac[3] = int(base_mac[3], 16)
|
||||
return ':'.join(["%02x" % x for x in mac])
|
||||
|
||||
|
||||
def get_random_string(length):
|
||||
"""Get a random hex string of the specified length.
|
||||
|
||||
based on Cinder library
|
||||
cinder/transfer/api.py
|
||||
"""
|
||||
rndstr = ""
|
||||
random.seed(datetime.datetime.now().microsecond)
|
||||
while len(rndstr) < length:
|
||||
rndstr += hashlib.sha224(str(random.random())).hexdigest()
|
||||
|
||||
return rndstr[0:length]
|
||||
|
||||
|
||||
def get_dhcp_agent_device_id(network_id, host):
|
||||
# Split host so as to always use only the hostname and
|
||||
# not the domain name. This will guarantee consistentcy
|
||||
# whether a local hostname or an fqdn is passed in.
|
||||
local_hostname = host.split('.')[0]
|
||||
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
|
||||
return 'dhcp%s-%s' % (host_uuid, network_id)
|
||||
|
||||
|
||||
def cpu_count():
|
||||
try:
|
||||
return multiprocessing.cpu_count()
|
||||
except NotImplementedError:
|
||||
return 1
|
|
@ -1,174 +0,0 @@
|
|||
# Copyright 2012 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Context: context for security/db session."""
|
||||
|
||||
import copy
|
||||
|
||||
import datetime
|
||||
|
||||
from neutron.db import api as db_api
|
||||
from neutron.openstack.common import context as common_context
|
||||
from neutron.openstack.common import local
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron import policy
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ContextBase(common_context.RequestContext):
|
||||
"""Security context and request information.
|
||||
|
||||
Represents the user taking a given action within the system.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no",
|
||||
roles=None, timestamp=None, load_admin_roles=True,
|
||||
request_id=None, tenant_name=None, user_name=None,
|
||||
overwrite=True, **kwargs):
|
||||
"""Object initialization.
|
||||
|
||||
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
|
||||
indicates deleted records are visible, 'only' indicates that
|
||||
*only* deleted records are visible.
|
||||
|
||||
:param overwrite: Set to False to ensure that the greenthread local
|
||||
copy of the index is not overwritten.
|
||||
|
||||
:param kwargs: Extra arguments that might be present, but we ignore
|
||||
because they possibly came in from older rpc messages.
|
||||
"""
|
||||
super(ContextBase, self).__init__(user=user_id, tenant=tenant_id,
|
||||
is_admin=is_admin,
|
||||
request_id=request_id)
|
||||
self.user_name = user_name
|
||||
self.tenant_name = tenant_name
|
||||
|
||||
self.read_deleted = read_deleted
|
||||
if not timestamp:
|
||||
timestamp = datetime.datetime.utcnow()
|
||||
self.timestamp = timestamp
|
||||
self._session = None
|
||||
self.roles = roles or []
|
||||
if self.is_admin is None:
|
||||
self.is_admin = policy.check_is_admin(self)
|
||||
elif self.is_admin and load_admin_roles:
|
||||
# Ensure context is populated with admin roles
|
||||
admin_roles = policy.get_admin_roles()
|
||||
if admin_roles:
|
||||
self.roles = list(set(self.roles) | set(admin_roles))
|
||||
# Allow openstack.common.log to access the context
|
||||
if overwrite or not hasattr(local.store, 'context'):
|
||||
local.store.context = self
|
||||
|
||||
# Log only once the context has been configured to prevent
|
||||
# format errors.
|
||||
if kwargs:
|
||||
LOG.debug(_('Arguments dropped when creating '
|
||||
'context: %s'), kwargs)
|
||||
|
||||
@property
|
||||
def project_id(self):
|
||||
return self.tenant
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self.tenant
|
||||
|
||||
@tenant_id.setter
|
||||
def tenant_id(self, tenant_id):
|
||||
self.tenant = tenant_id
|
||||
|
||||
@property
|
||||
def user_id(self):
|
||||
return self.user
|
||||
|
||||
@user_id.setter
|
||||
def user_id(self, user_id):
|
||||
self.user = user_id
|
||||
|
||||
def _get_read_deleted(self):
|
||||
return self._read_deleted
|
||||
|
||||
def _set_read_deleted(self, read_deleted):
|
||||
if read_deleted not in ('no', 'yes', 'only'):
|
||||
raise ValueError(_("read_deleted can only be one of 'no', "
|
||||
"'yes' or 'only', not %r") % read_deleted)
|
||||
self._read_deleted = read_deleted
|
||||
|
||||
def _del_read_deleted(self):
|
||||
del self._read_deleted
|
||||
|
||||
read_deleted = property(_get_read_deleted, _set_read_deleted,
|
||||
_del_read_deleted)
|
||||
|
||||
def to_dict(self):
|
||||
return {'user_id': self.user_id,
|
||||
'tenant_id': self.tenant_id,
|
||||
'project_id': self.project_id,
|
||||
'is_admin': self.is_admin,
|
||||
'read_deleted': self.read_deleted,
|
||||
'roles': self.roles,
|
||||
'timestamp': str(self.timestamp),
|
||||
'request_id': self.request_id,
|
||||
'tenant': self.tenant,
|
||||
'user': self.user,
|
||||
'tenant_name': self.tenant_name,
|
||||
'project_name': self.tenant_name,
|
||||
'user_name': self.user_name,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
def elevated(self, read_deleted=None):
|
||||
"""Return a version of this context with admin flag set."""
|
||||
context = copy.copy(self)
|
||||
context.is_admin = True
|
||||
|
||||
if 'admin' not in [x.lower() for x in context.roles]:
|
||||
context.roles.append('admin')
|
||||
|
||||
if read_deleted is not None:
|
||||
context.read_deleted = read_deleted
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class Context(ContextBase):
|
||||
@property
|
||||
def session(self):
|
||||
if self._session is None:
|
||||
self._session = db_api.get_session()
|
||||
return self._session
|
||||
|
||||
|
||||
def get_admin_context(read_deleted="no", load_admin_roles=True):
|
||||
return Context(user_id=None,
|
||||
tenant_id=None,
|
||||
is_admin=True,
|
||||
read_deleted=read_deleted,
|
||||
load_admin_roles=load_admin_roles,
|
||||
overwrite=False)
|
||||
|
||||
|
||||
def get_admin_context_without_session(read_deleted="no"):
|
||||
return ContextBase(user_id=None,
|
||||
tenant_id=None,
|
||||
is_admin=True,
|
||||
read_deleted=read_deleted)
|
|
@ -1,219 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.db import exception as db_exc
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import agent as ext_agent
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import jsonutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.register_opt(
|
||||
cfg.IntOpt('agent_down_time', default=75,
|
||||
help=_("Seconds to regard the agent is down; should be at "
|
||||
"least twice report_interval, to be sure the "
|
||||
"agent is down for good.")))
|
||||
|
||||
|
||||
class Agent(model_base.BASEV2, models_v2.HasId):
|
||||
"""Represents agents running in neutron deployments."""
|
||||
|
||||
__table_args__ = (
|
||||
sa.UniqueConstraint('agent_type', 'host',
|
||||
name='uniq_agents0agent_type0host'),
|
||||
)
|
||||
|
||||
# L3 agent, DHCP agent, OVS agent, LinuxBridge
|
||||
agent_type = sa.Column(sa.String(255), nullable=False)
|
||||
binary = sa.Column(sa.String(255), nullable=False)
|
||||
# TOPIC is a fanout exchange topic
|
||||
topic = sa.Column(sa.String(255), nullable=False)
|
||||
# TOPIC.host is a target topic
|
||||
host = sa.Column(sa.String(255), nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean, default=True,
|
||||
nullable=False)
|
||||
# the time when first report came from agents
|
||||
created_at = sa.Column(sa.DateTime, nullable=False)
|
||||
# the time when first report came after agents start
|
||||
started_at = sa.Column(sa.DateTime, nullable=False)
|
||||
# updated when agents report
|
||||
heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
|
||||
# description is note for admin user
|
||||
description = sa.Column(sa.String(255))
|
||||
# configurations: a json dict string, I think 4095 is enough
|
||||
configurations = sa.Column(sa.String(4095), nullable=False)
|
||||
|
||||
@property
|
||||
def is_active(self):
|
||||
return not AgentDbMixin.is_agent_down(self.heartbeat_timestamp)
|
||||
|
||||
|
||||
class AgentDbMixin(ext_agent.AgentPluginBase):
|
||||
"""Mixin class to add agent extension to db_base_plugin_v2."""
|
||||
|
||||
def _get_agent(self, context, id):
|
||||
try:
|
||||
agent = self._get_by_id(context, Agent, id)
|
||||
except exc.NoResultFound:
|
||||
raise ext_agent.AgentNotFound(id=id)
|
||||
return agent
|
||||
|
||||
@classmethod
|
||||
def is_agent_down(cls, heart_beat_time):
|
||||
return timeutils.is_older_than(heart_beat_time,
|
||||
cfg.CONF.agent_down_time)
|
||||
|
||||
def get_configuration_dict(self, agent_db):
|
||||
try:
|
||||
conf = jsonutils.loads(agent_db.configurations)
|
||||
except Exception:
|
||||
msg = _('Configuration for agent %(agent_type)s on host %(host)s'
|
||||
' is invalid.')
|
||||
LOG.warn(msg, {'agent_type': agent_db.agent_type,
|
||||
'host': agent_db.host})
|
||||
conf = {}
|
||||
return conf
|
||||
|
||||
def _make_agent_dict(self, agent, fields=None):
|
||||
attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get(
|
||||
ext_agent.RESOURCE_NAME + 's')
|
||||
res = dict((k, agent[k]) for k in attr
|
||||
if k not in ['alive', 'configurations'])
|
||||
res['alive'] = not AgentDbMixin.is_agent_down(
|
||||
res['heartbeat_timestamp'])
|
||||
res['configurations'] = self.get_configuration_dict(agent)
|
||||
return self._fields(res, fields)
|
||||
|
||||
def delete_agent(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
agent = self._get_agent(context, id)
|
||||
context.session.delete(agent)
|
||||
|
||||
def update_agent(self, context, id, agent):
|
||||
agent_data = agent['agent']
|
||||
with context.session.begin(subtransactions=True):
|
||||
agent = self._get_agent(context, id)
|
||||
agent.update(agent_data)
|
||||
return self._make_agent_dict(agent)
|
||||
|
||||
def get_agents_db(self, context, filters=None):
|
||||
query = self._get_collection_query(context, Agent, filters=filters)
|
||||
return query.all()
|
||||
|
||||
def get_agents(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, Agent,
|
||||
self._make_agent_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def _get_agent_by_type_and_host(self, context, agent_type, host):
|
||||
query = self._model_query(context, Agent)
|
||||
try:
|
||||
agent_db = query.filter(Agent.agent_type == agent_type,
|
||||
Agent.host == host).one()
|
||||
return agent_db
|
||||
except exc.NoResultFound:
|
||||
raise ext_agent.AgentNotFoundByTypeHost(agent_type=agent_type,
|
||||
host=host)
|
||||
except exc.MultipleResultsFound:
|
||||
raise ext_agent.MultipleAgentFoundByTypeHost(agent_type=agent_type,
|
||||
host=host)
|
||||
|
||||
def get_agent(self, context, id, fields=None):
|
||||
agent = self._get_agent(context, id)
|
||||
return self._make_agent_dict(agent, fields)
|
||||
|
||||
def _create_or_update_agent(self, context, agent):
|
||||
with context.session.begin(subtransactions=True):
|
||||
res_keys = ['agent_type', 'binary', 'host', 'topic']
|
||||
res = dict((k, agent[k]) for k in res_keys)
|
||||
|
||||
configurations_dict = agent.get('configurations', {})
|
||||
res['configurations'] = jsonutils.dumps(configurations_dict)
|
||||
current_time = timeutils.utcnow()
|
||||
try:
|
||||
agent_db = self._get_agent_by_type_and_host(
|
||||
context, agent['agent_type'], agent['host'])
|
||||
res['heartbeat_timestamp'] = current_time
|
||||
if agent.get('start_flag'):
|
||||
res['started_at'] = current_time
|
||||
greenthread.sleep(0)
|
||||
agent_db.update(res)
|
||||
except ext_agent.AgentNotFoundByTypeHost:
|
||||
greenthread.sleep(0)
|
||||
res['created_at'] = current_time
|
||||
res['started_at'] = current_time
|
||||
res['heartbeat_timestamp'] = current_time
|
||||
res['admin_state_up'] = True
|
||||
agent_db = Agent(**res)
|
||||
greenthread.sleep(0)
|
||||
context.session.add(agent_db)
|
||||
greenthread.sleep(0)
|
||||
|
||||
def create_or_update_agent(self, context, agent):
|
||||
"""Create or update agent according to report."""
|
||||
|
||||
try:
|
||||
return self._create_or_update_agent(context, agent)
|
||||
except db_exc.DBDuplicateEntry as e:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if e.columns == ['agent_type', 'host']:
|
||||
# It might happen that two or more concurrent transactions
|
||||
# are trying to insert new rows having the same value of
|
||||
# (agent_type, host) pair at the same time (if there has
|
||||
# been no such entry in the table and multiple agent status
|
||||
# updates are being processed at the moment). In this case
|
||||
# having a unique constraint on (agent_type, host) columns
|
||||
# guarantees that only one transaction will succeed and
|
||||
# insert a new agent entry, others will fail and be rolled
|
||||
# back. That means we must retry them one more time: no
|
||||
# INSERTs will be issued, because
|
||||
# _get_agent_by_type_and_host() will return the existing
|
||||
# agent entry, which will be updated multiple times
|
||||
ctxt.reraise = False
|
||||
return self._create_or_update_agent(context, agent)
|
||||
|
||||
|
||||
class AgentExtRpcCallback(n_rpc.RpcCallback):
|
||||
"""Processes the rpc report in plugin implementations."""
|
||||
|
||||
RPC_API_VERSION = '1.0'
|
||||
START_TIME = timeutils.utcnow()
|
||||
|
||||
def __init__(self, plugin=None):
|
||||
super(AgentExtRpcCallback, self).__init__()
|
||||
self.plugin = plugin
|
||||
|
||||
def report_state(self, context, **kwargs):
|
||||
"""Report state from agent to server."""
|
||||
time = kwargs['time']
|
||||
time = timeutils.parse_strtime(time)
|
||||
if self.START_TIME > time:
|
||||
LOG.debug(_("Message with invalid timestamp received"))
|
||||
return
|
||||
agent_state = kwargs['agent_state']['agent_state']
|
||||
if not self.plugin:
|
||||
self.plugin = manager.NeutronManager.get_plugin()
|
||||
self.plugin.create_or_update_agent(context, agent_state)
|
|
@ -1,226 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import utils
|
||||
from neutron.db import agents_db
|
||||
from neutron.db import model_base
|
||||
from neutron.extensions import agent as ext_agent
|
||||
from neutron.extensions import dhcpagentscheduler
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
AGENTS_SCHEDULER_OPTS = [
|
||||
cfg.StrOpt('network_scheduler_driver',
|
||||
default='neutron.scheduler.'
|
||||
'dhcp_agent_scheduler.ChanceScheduler',
|
||||
help=_('Driver to use for scheduling network to DHCP agent')),
|
||||
cfg.BoolOpt('network_auto_schedule', default=True,
|
||||
help=_('Allow auto scheduling networks to DHCP agent.')),
|
||||
cfg.IntOpt('dhcp_agents_per_network', default=1,
|
||||
help=_('Number of DHCP agents scheduled to host a network.')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
|
||||
|
||||
|
||||
class NetworkDhcpAgentBinding(model_base.BASEV2):
|
||||
"""Represents binding between neutron networks and DHCP agents."""
|
||||
|
||||
network_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("networks.id", ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
dhcp_agent = orm.relation(agents_db.Agent)
|
||||
dhcp_agent_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("agents.id",
|
||||
ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
|
||||
|
||||
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
||||
"""Common class for agent scheduler mixins."""
|
||||
|
||||
# agent notifiers to handle agent update operations;
|
||||
# should be updated by plugins;
|
||||
agent_notifiers = {
|
||||
constants.AGENT_TYPE_DHCP: None,
|
||||
constants.AGENT_TYPE_L3: None,
|
||||
constants.AGENT_TYPE_LOADBALANCER: None,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def is_eligible_agent(active, agent):
|
||||
if active is None:
|
||||
# filtering by activeness is disabled, all agents are eligible
|
||||
return True
|
||||
else:
|
||||
# note(rpodolyaka): original behaviour is saved here: if active
|
||||
# filter is set, only agents which are 'up'
|
||||
# (i.e. have a recent heartbeat timestamp)
|
||||
# are eligible, even if active is False
|
||||
return not agents_db.AgentDbMixin.is_agent_down(
|
||||
agent['heartbeat_timestamp'])
|
||||
|
||||
def update_agent(self, context, id, agent):
|
||||
original_agent = self.get_agent(context, id)
|
||||
result = super(AgentSchedulerDbMixin, self).update_agent(
|
||||
context, id, agent)
|
||||
agent_data = agent['agent']
|
||||
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
|
||||
if (agent_notifier and
|
||||
'admin_state_up' in agent_data and
|
||||
original_agent['admin_state_up'] != agent_data['admin_state_up']):
|
||||
agent_notifier.agent_updated(context,
|
||||
agent_data['admin_state_up'],
|
||||
original_agent['host'])
|
||||
return result
|
||||
|
||||
|
||||
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
||||
.DhcpAgentSchedulerPluginBase,
|
||||
AgentSchedulerDbMixin):
|
||||
"""Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2.
|
||||
"""
|
||||
|
||||
network_scheduler = None
|
||||
|
||||
def get_dhcp_agents_hosting_networks(
|
||||
self, context, network_ids, active=None):
|
||||
if not network_ids:
|
||||
return []
|
||||
query = context.session.query(NetworkDhcpAgentBinding)
|
||||
query = query.options(joinedload('dhcp_agent'))
|
||||
if len(network_ids) == 1:
|
||||
query = query.filter(
|
||||
NetworkDhcpAgentBinding.network_id == network_ids[0])
|
||||
elif network_ids:
|
||||
query = query.filter(
|
||||
NetworkDhcpAgentBinding.network_id in network_ids)
|
||||
if active is not None:
|
||||
query = (query.filter(agents_db.Agent.admin_state_up == active))
|
||||
|
||||
return [binding.dhcp_agent
|
||||
for binding in query
|
||||
if AgentSchedulerDbMixin.is_eligible_agent(active,
|
||||
binding.dhcp_agent)]
|
||||
|
||||
def add_network_to_dhcp_agent(self, context, id, network_id):
|
||||
self._get_network(context, network_id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
agent_db = self._get_agent(context, id)
|
||||
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
|
||||
not agent_db['admin_state_up']):
|
||||
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
|
||||
dhcp_agents = self.get_dhcp_agents_hosting_networks(
|
||||
context, [network_id])
|
||||
for dhcp_agent in dhcp_agents:
|
||||
if id == dhcp_agent.id:
|
||||
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
|
||||
network_id=network_id, agent_id=id)
|
||||
binding = NetworkDhcpAgentBinding()
|
||||
binding.dhcp_agent_id = id
|
||||
binding.network_id = network_id
|
||||
context.session.add(binding)
|
||||
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
|
||||
if dhcp_notifier:
|
||||
dhcp_notifier.network_added_to_agent(
|
||||
context, network_id, agent_db.host)
|
||||
|
||||
def remove_network_from_dhcp_agent(self, context, id, network_id):
|
||||
agent = self._get_agent(context, id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
query = context.session.query(NetworkDhcpAgentBinding)
|
||||
binding = query.filter(
|
||||
NetworkDhcpAgentBinding.network_id == network_id,
|
||||
NetworkDhcpAgentBinding.dhcp_agent_id == id).one()
|
||||
except exc.NoResultFound:
|
||||
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
|
||||
network_id=network_id, agent_id=id)
|
||||
|
||||
# reserve the port, so the ip is reused on a subsequent add
|
||||
device_id = utils.get_dhcp_agent_device_id(network_id,
|
||||
agent['host'])
|
||||
filters = dict(device_id=[device_id])
|
||||
ports = self.get_ports(context, filters=filters)
|
||||
for port in ports:
|
||||
port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT
|
||||
self.update_port(context, port['id'], dict(port=port))
|
||||
|
||||
context.session.delete(binding)
|
||||
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
|
||||
if dhcp_notifier:
|
||||
dhcp_notifier.network_removed_from_agent(
|
||||
context, network_id, agent.host)
|
||||
|
||||
def list_networks_on_dhcp_agent(self, context, id):
|
||||
query = context.session.query(NetworkDhcpAgentBinding.network_id)
|
||||
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
|
||||
|
||||
net_ids = [item[0] for item in query]
|
||||
if net_ids:
|
||||
return {'networks':
|
||||
self.get_networks(context, filters={'id': net_ids})}
|
||||
else:
|
||||
return {'networks': []}
|
||||
|
||||
def list_active_networks_on_active_dhcp_agent(self, context, host):
|
||||
try:
|
||||
agent = self._get_agent_by_type_and_host(
|
||||
context, constants.AGENT_TYPE_DHCP, host)
|
||||
except ext_agent.AgentNotFoundByTypeHost:
|
||||
LOG.debug("DHCP Agent not found on host %s", host)
|
||||
return []
|
||||
|
||||
if not agent.admin_state_up:
|
||||
return []
|
||||
query = context.session.query(NetworkDhcpAgentBinding.network_id)
|
||||
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
|
||||
|
||||
net_ids = [item[0] for item in query]
|
||||
if net_ids:
|
||||
return self.get_networks(
|
||||
context,
|
||||
filters={'id': net_ids, 'admin_state_up': [True]}
|
||||
)
|
||||
else:
|
||||
return []
|
||||
|
||||
def list_dhcp_agents_hosting_network(self, context, network_id):
|
||||
dhcp_agents = self.get_dhcp_agents_hosting_networks(
|
||||
context, [network_id])
|
||||
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
|
||||
if agent_ids:
|
||||
return {
|
||||
'agents': self.get_agents(context, filters={'id': agent_ids})}
|
||||
else:
|
||||
return {'agents': []}
|
||||
|
||||
def schedule_network(self, context, created_network):
|
||||
if self.network_scheduler:
|
||||
return self.network_scheduler.schedule(
|
||||
self, context, created_network)
|
||||
|
||||
def auto_schedule_networks(self, context, host):
|
||||
if self.network_scheduler:
|
||||
self.network_scheduler.auto_schedule_networks(self, context, host)
|
|
@ -1,147 +0,0 @@
|
|||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import allowedaddresspairs as addr_pair
|
||||
|
||||
|
||||
class AllowedAddressPair(model_base.BASEV2):
|
||||
port_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ports.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True)
|
||||
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
|
||||
|
||||
port = orm.relationship(
|
||||
models_v2.Port,
|
||||
backref=orm.backref("allowed_address_pairs",
|
||||
lazy="joined", cascade="delete"))
|
||||
|
||||
|
||||
class AllowedAddressPairsMixin(object):
|
||||
"""Mixin class for allowed address pairs."""
|
||||
|
||||
def _process_create_allowed_address_pairs(self, context, port,
|
||||
allowed_address_pairs):
|
||||
if not attr.is_attr_set(allowed_address_pairs):
|
||||
return []
|
||||
with context.session.begin(subtransactions=True):
|
||||
for address_pair in allowed_address_pairs:
|
||||
# use port.mac_address if no mac address in address pair
|
||||
if 'mac_address' not in address_pair:
|
||||
address_pair['mac_address'] = port['mac_address']
|
||||
db_pair = AllowedAddressPair(
|
||||
port_id=port['id'],
|
||||
mac_address=address_pair['mac_address'],
|
||||
ip_address=address_pair['ip_address'])
|
||||
context.session.add(db_pair)
|
||||
|
||||
return allowed_address_pairs
|
||||
|
||||
def get_allowed_address_pairs(self, context, port_id):
|
||||
pairs = (context.session.query(AllowedAddressPair).
|
||||
filter_by(port_id=port_id))
|
||||
return [self._make_allowed_address_pairs_dict(pair)
|
||||
for pair in pairs]
|
||||
|
||||
def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
|
||||
# If port_db is provided, allowed address pairs will be accessed via
|
||||
# sqlalchemy models. As they're loaded together with ports this
|
||||
# will not cause an extra query.
|
||||
allowed_address_pairs = [
|
||||
self._make_allowed_address_pairs_dict(address_pair) for
|
||||
address_pair in port_db.allowed_address_pairs]
|
||||
port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
|
||||
return port_res
|
||||
|
||||
# Register dict extend functions for ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])
|
||||
|
||||
def _delete_allowed_address_pairs(self, context, id):
|
||||
query = self._model_query(context, AllowedAddressPair)
|
||||
with context.session.begin(subtransactions=True):
|
||||
query.filter(AllowedAddressPair.port_id == id).delete()
|
||||
|
||||
def _make_allowed_address_pairs_dict(self, allowed_address_pairs,
|
||||
fields=None):
|
||||
res = {'mac_address': allowed_address_pairs['mac_address'],
|
||||
'ip_address': allowed_address_pairs['ip_address']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _has_address_pairs(self, port):
|
||||
return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
|
||||
and port['port'][addr_pair.ADDRESS_PAIRS] != [])
|
||||
|
||||
def _check_update_has_allowed_address_pairs(self, port):
|
||||
"""Determine if request has an allowed address pair.
|
||||
|
||||
Return True if the port parameter has a non-empty
|
||||
'allowed_address_pairs' attribute. Otherwise returns False.
|
||||
"""
|
||||
return (addr_pair.ADDRESS_PAIRS in port['port'] and
|
||||
self._has_address_pairs(port))
|
||||
|
||||
def _check_update_deletes_allowed_address_pairs(self, port):
|
||||
"""Determine if request deletes address pair.
|
||||
|
||||
Return True if port has as a allowed address pair and its value
|
||||
is either [] or not is_attr_set, otherwise return False
|
||||
"""
|
||||
return (addr_pair.ADDRESS_PAIRS in port['port'] and
|
||||
not self._has_address_pairs(port))
|
||||
|
||||
def is_address_pairs_attribute_updated(self, port, update_attrs):
|
||||
"""Check if the address pairs attribute is being updated.
|
||||
|
||||
Returns True if there is an update. This can be used to decide
|
||||
if a port update notification should be sent to agents or third
|
||||
party controllers.
|
||||
"""
|
||||
|
||||
new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
|
||||
if new_pairs is None:
|
||||
return False
|
||||
old_pairs = port.get(addr_pair.ADDRESS_PAIRS)
|
||||
|
||||
# Missing or unchanged address pairs in attributes mean no update
|
||||
return new_pairs != old_pairs
|
||||
|
||||
def update_address_pairs_on_port(self, context, port_id, port,
|
||||
original_port, updated_port):
|
||||
"""Update allowed address pairs on port.
|
||||
|
||||
Returns True if an update notification is required. Notification
|
||||
is not done here because other changes on the port may need
|
||||
notification. This method is expected to be called within
|
||||
a transaction.
|
||||
"""
|
||||
new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)
|
||||
|
||||
if self.is_address_pairs_attribute_updated(original_port,
|
||||
port['port']):
|
||||
updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
|
||||
self._delete_allowed_address_pairs(context, port_id)
|
||||
self._process_create_allowed_address_pairs(
|
||||
context, updated_port, new_pairs)
|
||||
return True
|
||||
|
||||
return False
|
|
@ -1,84 +0,0 @@
|
|||
# Copyright 2011 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.db.sqlalchemy import session
|
||||
import sqlalchemy as sql
|
||||
|
||||
from neutron.db import model_base
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
BASE = model_base.BASEV2
|
||||
|
||||
_FACADE = None
|
||||
|
||||
|
||||
def _create_facade_lazily():
|
||||
global _FACADE
|
||||
|
||||
if _FACADE is None:
|
||||
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
|
||||
|
||||
return _FACADE
|
||||
|
||||
|
||||
def configure_db():
|
||||
"""Configure database.
|
||||
|
||||
Establish the database, create an engine if needed, and register
|
||||
the models.
|
||||
"""
|
||||
register_models()
|
||||
|
||||
|
||||
def clear_db(base=BASE):
|
||||
unregister_models(base)
|
||||
|
||||
|
||||
def get_engine():
|
||||
"""Helper method to grab engine."""
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_engine()
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False):
|
||||
"""Helper method to grab session."""
|
||||
facade = _create_facade_lazily()
|
||||
return facade.get_session(autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit)
|
||||
|
||||
|
||||
def register_models(base=BASE):
|
||||
"""Register Models and create properties."""
|
||||
try:
|
||||
facade = _create_facade_lazily()
|
||||
engine = facade.get_engine()
|
||||
base.metadata.create_all(engine)
|
||||
except sql.exc.OperationalError as e:
|
||||
LOG.info(_("Database registration exception: %s"), e)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def unregister_models(base=BASE):
|
||||
"""Unregister Models, useful clearing out data before testing."""
|
||||
try:
|
||||
facade = _create_facade_lazily()
|
||||
engine = facade.get_engine()
|
||||
base.metadata.drop_all(engine)
|
||||
except Exception:
|
||||
LOG.exception(_("Database exception"))
|
File diff suppressed because it is too large
Load Diff
|
@ -1,287 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.db import exception as db_exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.common import utils
|
||||
from neutron.extensions import portbindings
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DhcpRpcCallbackMixin(object):
|
||||
"""A mix-in that enable DHCP agent support in plugin implementations."""
|
||||
|
||||
def _get_active_networks(self, context, **kwargs):
|
||||
"""Retrieve and return a list of the active networks."""
|
||||
host = kwargs.get('host')
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
if utils.is_extension_supported(
|
||||
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
|
||||
if cfg.CONF.network_auto_schedule:
|
||||
plugin.auto_schedule_networks(context, host)
|
||||
nets = plugin.list_active_networks_on_active_dhcp_agent(
|
||||
context, host)
|
||||
else:
|
||||
filters = dict(admin_state_up=[True])
|
||||
nets = plugin.get_networks(context, filters=filters)
|
||||
return nets
|
||||
|
||||
def _port_action(self, plugin, context, port, action):
|
||||
"""Perform port operations taking care of concurrency issues."""
|
||||
try:
|
||||
if action == 'create_port':
|
||||
return plugin.create_port(context, port)
|
||||
elif action == 'update_port':
|
||||
return plugin.update_port(context, port['id'], port['port'])
|
||||
else:
|
||||
msg = _('Unrecognized action')
|
||||
raise n_exc.Invalid(message=msg)
|
||||
except (db_exc.DBError, n_exc.NetworkNotFound,
|
||||
n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
|
||||
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
|
||||
if isinstance(e, n_exc.IpAddressGenerationFailure):
|
||||
# Check if the subnet still exists and if it does not,
|
||||
# this is the reason why the ip address generation failed.
|
||||
# In any other unlikely event re-raise
|
||||
try:
|
||||
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
|
||||
plugin.get_subnet(context, subnet_id)
|
||||
except n_exc.SubnetNotFound:
|
||||
pass
|
||||
else:
|
||||
ctxt.reraise = True
|
||||
net_id = port['port']['network_id']
|
||||
LOG.warn(_("Action %(action)s for network %(net_id)s "
|
||||
"could not complete successfully: %(reason)s")
|
||||
% {"action": action, "net_id": net_id, 'reason': e})
|
||||
|
||||
def get_active_networks(self, context, **kwargs):
|
||||
"""Retrieve and return a list of the active network ids."""
|
||||
# NOTE(arosen): This method is no longer used by the DHCP agent but is
|
||||
# left so that neutron-dhcp-agents will still continue to work if
|
||||
# neutron-server is upgraded and not the agent.
|
||||
host = kwargs.get('host')
|
||||
LOG.debug(_('get_active_networks requested from %s'), host)
|
||||
nets = self._get_active_networks(context, **kwargs)
|
||||
return [net['id'] for net in nets]
|
||||
|
||||
def get_active_networks_info(self, context, **kwargs):
|
||||
"""Returns all the networks/subnets/ports in system."""
|
||||
host = kwargs.get('host')
|
||||
LOG.debug(_('get_active_networks_info from %s'), host)
|
||||
networks = self._get_active_networks(context, **kwargs)
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
filters = {'network_id': [network['id'] for network in networks]}
|
||||
ports = plugin.get_ports(context, filters=filters)
|
||||
filters['enable_dhcp'] = [True]
|
||||
subnets = plugin.get_subnets(context, filters=filters)
|
||||
|
||||
for network in networks:
|
||||
network['subnets'] = [subnet for subnet in subnets
|
||||
if subnet['network_id'] == network['id']]
|
||||
network['ports'] = [port for port in ports
|
||||
if port['network_id'] == network['id']]
|
||||
|
||||
return networks
|
||||
|
||||
def get_network_info(self, context, **kwargs):
|
||||
"""Retrieve and return a extended information about a network."""
|
||||
network_id = kwargs.get('network_id')
|
||||
host = kwargs.get('host')
|
||||
LOG.debug(_('Network %(network_id)s requested from '
|
||||
'%(host)s'), {'network_id': network_id,
|
||||
'host': host})
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
try:
|
||||
network = plugin.get_network(context, network_id)
|
||||
except n_exc.NetworkNotFound:
|
||||
LOG.warn(_("Network %s could not be found, it might have "
|
||||
"been deleted concurrently."), network_id)
|
||||
return
|
||||
filters = dict(network_id=[network_id])
|
||||
network['subnets'] = plugin.get_subnets(context, filters=filters)
|
||||
network['ports'] = plugin.get_ports(context, filters=filters)
|
||||
return network
|
||||
|
||||
def get_dhcp_port(self, context, **kwargs):
|
||||
"""Allocate a DHCP port for the host and return port information.
|
||||
|
||||
This method will re-use an existing port if one already exists. When a
|
||||
port is re-used, the fixed_ip allocation will be updated to the current
|
||||
network state. If an expected failure occurs, a None port is returned.
|
||||
|
||||
"""
|
||||
host = kwargs.get('host')
|
||||
network_id = kwargs.get('network_id')
|
||||
device_id = kwargs.get('device_id')
|
||||
# There could be more than one dhcp server per network, so create
|
||||
# a device id that combines host and network ids
|
||||
|
||||
LOG.debug(_('Port %(device_id)s for %(network_id)s requested from '
|
||||
'%(host)s'), {'device_id': device_id,
|
||||
'network_id': network_id,
|
||||
'host': host})
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
retval = None
|
||||
|
||||
filters = dict(network_id=[network_id])
|
||||
subnets = dict([(s['id'], s) for s in
|
||||
plugin.get_subnets(context, filters=filters)])
|
||||
|
||||
dhcp_enabled_subnet_ids = [s['id'] for s in
|
||||
subnets.values() if s['enable_dhcp']]
|
||||
|
||||
try:
|
||||
filters = dict(network_id=[network_id], device_id=[device_id])
|
||||
ports = plugin.get_ports(context, filters=filters)
|
||||
if ports:
|
||||
# Ensure that fixed_ips cover all dhcp_enabled subnets.
|
||||
port = ports[0]
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
|
||||
dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
|
||||
port['fixed_ips'].extend(
|
||||
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
|
||||
|
||||
retval = plugin.update_port(context, port['id'],
|
||||
dict(port=port))
|
||||
|
||||
except n_exc.NotFound as e:
|
||||
LOG.warning(e)
|
||||
|
||||
if retval is None:
|
||||
# No previous port exists, so create a new one.
|
||||
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s '
|
||||
'does not exist on %(host)s'),
|
||||
{'device_id': device_id,
|
||||
'network_id': network_id,
|
||||
'host': host})
|
||||
try:
|
||||
network = plugin.get_network(context, network_id)
|
||||
except n_exc.NetworkNotFound:
|
||||
LOG.warn(_("Network %s could not be found, it might have "
|
||||
"been deleted concurrently."), network_id)
|
||||
return
|
||||
|
||||
port_dict = dict(
|
||||
admin_state_up=True,
|
||||
device_id=device_id,
|
||||
network_id=network_id,
|
||||
tenant_id=network['tenant_id'],
|
||||
mac_address=attributes.ATTR_NOT_SPECIFIED,
|
||||
name='',
|
||||
device_owner=constants.DEVICE_OWNER_DHCP,
|
||||
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
|
||||
|
||||
retval = self._port_action(plugin, context, {'port': port_dict},
|
||||
'create_port')
|
||||
if not retval:
|
||||
return
|
||||
|
||||
# Convert subnet_id to subnet dict
|
||||
for fixed_ip in retval['fixed_ips']:
|
||||
subnet_id = fixed_ip.pop('subnet_id')
|
||||
fixed_ip['subnet'] = subnets[subnet_id]
|
||||
|
||||
return retval
|
||||
|
||||
def release_dhcp_port(self, context, **kwargs):
|
||||
"""Release the port currently being used by a DHCP agent."""
|
||||
host = kwargs.get('host')
|
||||
network_id = kwargs.get('network_id')
|
||||
device_id = kwargs.get('device_id')
|
||||
|
||||
LOG.debug(_('DHCP port deletion for %(network_id)s request from '
|
||||
'%(host)s'),
|
||||
{'network_id': network_id, 'host': host})
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
plugin.delete_ports_by_device_id(context, device_id, network_id)
|
||||
|
||||
def release_port_fixed_ip(self, context, **kwargs):
|
||||
"""Release the fixed_ip associated the subnet on a port."""
|
||||
host = kwargs.get('host')
|
||||
network_id = kwargs.get('network_id')
|
||||
device_id = kwargs.get('device_id')
|
||||
subnet_id = kwargs.get('subnet_id')
|
||||
|
||||
LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request '
|
||||
'from %(host)s'),
|
||||
{'subnet_id': subnet_id, 'host': host})
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
filters = dict(network_id=[network_id], device_id=[device_id])
|
||||
ports = plugin.get_ports(context, filters=filters)
|
||||
|
||||
if ports:
|
||||
port = ports[0]
|
||||
|
||||
fixed_ips = port.get('fixed_ips', [])
|
||||
for i in range(len(fixed_ips)):
|
||||
if fixed_ips[i]['subnet_id'] == subnet_id:
|
||||
del fixed_ips[i]
|
||||
break
|
||||
plugin.update_port(context, port['id'], dict(port=port))
|
||||
|
||||
def update_lease_expiration(self, context, **kwargs):
|
||||
"""Release the fixed_ip associated the subnet on a port."""
|
||||
# NOTE(arosen): This method is no longer used by the DHCP agent but is
|
||||
# left so that neutron-dhcp-agents will still continue to work if
|
||||
# neutron-server is upgraded and not the agent.
|
||||
host = kwargs.get('host')
|
||||
|
||||
LOG.warning(_('Updating lease expiration is now deprecated. Issued '
|
||||
'from host %s.'), host)
|
||||
|
||||
def create_dhcp_port(self, context, **kwargs):
|
||||
"""Create and return dhcp port information.
|
||||
|
||||
If an expected failure occurs, a None port is returned.
|
||||
|
||||
"""
|
||||
host = kwargs.get('host')
|
||||
port = kwargs.get('port')
|
||||
LOG.debug(_('Create dhcp port %(port)s '
|
||||
'from %(host)s.'),
|
||||
{'port': port,
|
||||
'host': host})
|
||||
|
||||
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
|
||||
port['port'][portbindings.HOST_ID] = host
|
||||
if 'mac_address' not in port['port']:
|
||||
port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
return self._port_action(plugin, context, port, 'create_port')
|
||||
|
||||
def update_dhcp_port(self, context, **kwargs):
|
||||
"""Update the dhcp port."""
|
||||
host = kwargs.get('host')
|
||||
port_id = kwargs.get('port_id')
|
||||
port = kwargs.get('port')
|
||||
LOG.debug(_('Update dhcp port %(port)s '
|
||||
'from %(host)s.'),
|
||||
{'port': port,
|
||||
'host': host})
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
return self._port_action(plugin, context,
|
||||
{'id': port_id, 'port': port},
|
||||
'update_port')
|
|
@ -1,156 +0,0 @@
|
|||
# Copyright 2014 Hewlett Packard, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.db import exception as db_exc
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.common import exceptions as q_exc
|
||||
from neutron.common import log
|
||||
from neutron.common import utils
|
||||
from neutron.db import model_base
|
||||
from neutron.extensions import dvr as ext_dvr
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from oslo.config import cfg
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
dvr_mac_address_opts = [
|
||||
cfg.StrOpt('dvr_base_mac',
|
||||
default="fa:16:3f:00:00:00",
|
||||
help=_('The base mac address used for unique '
|
||||
'DVR instances by Neutron')),
|
||||
]
|
||||
cfg.CONF.register_opts(dvr_mac_address_opts)
|
||||
|
||||
|
||||
class DistributedVirtualRouterMacAddress(model_base.BASEV2):
|
||||
"""Represents a v2 neutron distributed virtual router mac address."""
|
||||
|
||||
__tablename__ = 'dvr_host_macs'
|
||||
|
||||
host = sa.Column(sa.String(255), primary_key=True, nullable=False)
|
||||
mac_address = sa.Column(sa.String(32), nullable=False, unique=True)
|
||||
|
||||
|
||||
class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
|
||||
"""Mixin class to add dvr mac address to db_plugin_base_v2."""
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
try:
|
||||
if self._plugin is not None:
|
||||
return self._plugin
|
||||
except AttributeError:
|
||||
pass
|
||||
self._plugin = manager.NeutronManager.get_plugin()
|
||||
return self._plugin
|
||||
|
||||
def _get_dvr_mac_address_by_host(self, context, host):
|
||||
try:
|
||||
query = context.session.query(DistributedVirtualRouterMacAddress)
|
||||
dvrma = query.filter(
|
||||
DistributedVirtualRouterMacAddress.host == host).one()
|
||||
except exc.NoResultFound:
|
||||
raise ext_dvr.DVRMacAddressNotFound(host=host)
|
||||
return dvrma
|
||||
|
||||
def _create_dvr_mac_address(self, context, host):
|
||||
"""Create dvr mac address for a given host."""
|
||||
base_mac = cfg.CONF.dvr_base_mac.split(':')
|
||||
max_retries = cfg.CONF.mac_generation_retries
|
||||
for attempt in reversed(range(max_retries)):
|
||||
try:
|
||||
with context.session.begin(subtransactions=True):
|
||||
mac_address = utils.get_random_mac(base_mac)
|
||||
dvr_mac_binding = DistributedVirtualRouterMacAddress(
|
||||
host=host, mac_address=mac_address)
|
||||
context.session.add(dvr_mac_binding)
|
||||
LOG.debug("Generated dvr mac for host %(host)s "
|
||||
"is %(mac_address)s",
|
||||
{'host': host, 'mac_address': mac_address})
|
||||
dvr_macs = self.get_dvr_mac_address_list(context)
|
||||
self.notifier.dvr_mac_address_update(context, dvr_macs)
|
||||
return self._make_dvr_mac_address_dict(dvr_mac_binding)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
LOG.debug("Generated dvr mac %(mac)s exists."
|
||||
" Remaining attempts %(attempts_left)s.",
|
||||
{'mac': mac_address, 'attempts_left': attempt})
|
||||
LOG.error(_("MAC generation error after %s attempts"), max_retries)
|
||||
raise ext_dvr.MacAddressGenerationFailure(host=host)
|
||||
|
||||
def delete_dvr_mac_address(self, context, host):
|
||||
query = context.session.query(DistributedVirtualRouterMacAddress)
|
||||
query.filter(DistributedVirtualRouterMacAddress.host == host).delete()
|
||||
|
||||
def get_dvr_mac_address_list(self, context):
|
||||
with context.session.begin(subtransactions=True):
|
||||
query = context.session.query(DistributedVirtualRouterMacAddress)
|
||||
dvrmacs = query.all()
|
||||
return dvrmacs
|
||||
|
||||
def get_dvr_mac_address_by_host(self, context, host):
|
||||
if not host:
|
||||
LOG.warn(_("get_dvr_mac_address_by_host, host input is empty"))
|
||||
return
|
||||
|
||||
try:
|
||||
return self._get_dvr_mac_address_by_host(context, host)
|
||||
except ext_dvr.DVRMacAddressNotFound:
|
||||
return self._create_dvr_mac_address(context, host)
|
||||
|
||||
def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None):
|
||||
return {'host': dvr_mac_entry['host'],
|
||||
'mac_address': dvr_mac_entry['mac_address']}
|
||||
|
||||
@log.log
|
||||
def get_compute_ports_on_host_by_subnet(self, context, host, subnet):
|
||||
#FIXME(vivek): need to optimize this code to do away two-step filtering
|
||||
vm_ports_by_host = []
|
||||
filter = {'fixed_ips': {'subnet_id': [subnet]}}
|
||||
ports = self.plugin.get_ports(context, filters=filter)
|
||||
LOG.debug("List of Ports on subnet %(subnet) received as %(ports)",
|
||||
{'subnet': subnet, 'ports': ports})
|
||||
for port in ports:
|
||||
if 'compute:' in port['device_owner']:
|
||||
if port['binding:host_id'] == host:
|
||||
port_dict = self.plugin._make_port_dict(
|
||||
port, process_extensions=False)
|
||||
vm_ports_by_host.append(port_dict)
|
||||
LOG.debug("Returning list of VM Ports on host %(host) for subnet "
|
||||
" %(subnet) ports %(ports)",
|
||||
{'host': host, 'subnet': subnet, 'ports': vm_ports_by_host})
|
||||
return vm_ports_by_host
|
||||
|
||||
@log.log
|
||||
def get_subnet_for_dvr(self, context, subnet):
|
||||
try:
|
||||
subnet_info = self.plugin.get_subnet(context, subnet)
|
||||
except q_exc.SubnetNotFound:
|
||||
return {}
|
||||
else:
|
||||
# retrieve the gateway port on this subnet
|
||||
filter = {'fixed_ips': {'subnet_id': [subnet],
|
||||
'ip_address': [subnet_info['gateway_ip']]}}
|
||||
internal_gateway_ports = self.plugin.get_ports(
|
||||
context, filters=filter)
|
||||
if not internal_gateway_ports:
|
||||
LOG.error(_("Could not retrieve gateway port "
|
||||
"for subnet %s"), subnet_info)
|
||||
return {}
|
||||
internal_port = internal_gateway_ports[0]
|
||||
subnet_info['gateway_mac'] = internal_port['mac_address']
|
||||
return subnet_info
|
|
@ -1,163 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.sql import expression as expr
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import external_net
|
||||
from neutron import manager
|
||||
from neutron.plugins.common import constants as service_constants
|
||||
|
||||
|
||||
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
|
||||
|
||||
|
||||
class ExternalNetwork(model_base.BASEV2):
|
||||
network_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('networks.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
# Add a relationship to the Network model in order to instruct
|
||||
# SQLAlchemy to eagerly load this association
|
||||
network = orm.relationship(
|
||||
models_v2.Network,
|
||||
backref=orm.backref("external", lazy='joined',
|
||||
uselist=False, cascade='delete'))
|
||||
|
||||
|
||||
class External_net_db_mixin(object):
|
||||
"""Mixin class to add external network methods to db_base_plugin_v2."""
|
||||
|
||||
def _network_model_hook(self, context, original_model, query):
|
||||
query = query.outerjoin(ExternalNetwork,
|
||||
(original_model.id ==
|
||||
ExternalNetwork.network_id))
|
||||
return query
|
||||
|
||||
def _network_filter_hook(self, context, original_model, conditions):
|
||||
if conditions is not None and not hasattr(conditions, '__iter__'):
|
||||
conditions = (conditions, )
|
||||
# Apply the external network filter only in non-admin context
|
||||
if not context.is_admin and hasattr(original_model, 'tenant_id'):
|
||||
conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
|
||||
*conditions)
|
||||
return conditions
|
||||
|
||||
def _network_result_filter_hook(self, query, filters):
|
||||
vals = filters and filters.get(external_net.EXTERNAL, [])
|
||||
if not vals:
|
||||
return query
|
||||
if vals[0]:
|
||||
return query.filter((ExternalNetwork.network_id != expr.null()))
|
||||
return query.filter((ExternalNetwork.network_id == expr.null()))
|
||||
|
||||
# TODO(salvatore-orlando): Perform this operation without explicitly
|
||||
# referring to db_base_plugin_v2, as plugins that do not extend from it
|
||||
# might exist in the future
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
|
||||
models_v2.Network,
|
||||
"external_net",
|
||||
'_network_model_hook',
|
||||
'_network_filter_hook',
|
||||
'_network_result_filter_hook')
|
||||
|
||||
def _network_is_external(self, context, net_id):
|
||||
try:
|
||||
context.session.query(ExternalNetwork).filter_by(
|
||||
network_id=net_id).one()
|
||||
return True
|
||||
except exc.NoResultFound:
|
||||
return False
|
||||
|
||||
def _extend_network_dict_l3(self, network_res, network_db):
|
||||
# Comparing with None for converting uuid into bool
|
||||
network_res[external_net.EXTERNAL] = network_db.external is not None
|
||||
return network_res
|
||||
|
||||
# Register dict extend functions for networks
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attributes.NETWORKS, ['_extend_network_dict_l3'])
|
||||
|
||||
def _process_l3_create(self, context, net_data, req_data):
|
||||
external = req_data.get(external_net.EXTERNAL)
|
||||
external_set = attributes.is_attr_set(external)
|
||||
|
||||
if not external_set:
|
||||
return
|
||||
|
||||
if external:
|
||||
# expects to be called within a plugin's session
|
||||
context.session.add(ExternalNetwork(network_id=net_data['id']))
|
||||
net_data[external_net.EXTERNAL] = external
|
||||
|
||||
def _process_l3_update(self, context, net_data, req_data):
|
||||
|
||||
new_value = req_data.get(external_net.EXTERNAL)
|
||||
net_id = net_data['id']
|
||||
if not attributes.is_attr_set(new_value):
|
||||
return
|
||||
|
||||
if net_data.get(external_net.EXTERNAL) == new_value:
|
||||
return
|
||||
|
||||
if new_value:
|
||||
context.session.add(ExternalNetwork(network_id=net_id))
|
||||
net_data[external_net.EXTERNAL] = True
|
||||
else:
|
||||
# must make sure we do not have any external gateway ports
|
||||
# (and thus, possible floating IPs) on this network before
|
||||
# allow it to be update to external=False
|
||||
port = context.session.query(models_v2.Port).filter_by(
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW,
|
||||
network_id=net_data['id']).first()
|
||||
if port:
|
||||
raise external_net.ExternalNetworkInUse(net_id=net_id)
|
||||
|
||||
context.session.query(ExternalNetwork).filter_by(
|
||||
network_id=net_id).delete()
|
||||
net_data[external_net.EXTERNAL] = False
|
||||
|
||||
def _process_l3_delete(self, context, network_id):
|
||||
l3plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
if l3plugin:
|
||||
l3plugin.delete_disassociated_floatingips(context, network_id)
|
||||
|
||||
def _filter_nets_l3(self, context, nets, filters):
|
||||
vals = filters and filters.get(external_net.EXTERNAL, [])
|
||||
if not vals:
|
||||
return nets
|
||||
|
||||
ext_nets = set(en['network_id']
|
||||
for en in context.session.query(ExternalNetwork))
|
||||
if vals[0]:
|
||||
return [n for n in nets if n['id'] in ext_nets]
|
||||
else:
|
||||
return [n for n in nets if n['id'] not in ext_nets]
|
||||
|
||||
def get_external_network_id(self, context):
|
||||
nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
|
||||
if len(nets) > 1:
|
||||
raise n_exc.TooManyExternalNetworks()
|
||||
else:
|
||||
return nets[0]['id'] if nets else None
|
|
@ -1,185 +0,0 @@
|
|||
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.common import utils
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import extraroute
|
||||
from neutron.extensions import l3
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
extra_route_opts = [
|
||||
#TODO(nati): use quota framework when it support quota for attributes
|
||||
cfg.IntOpt('max_routes', default=30,
|
||||
help=_("Maximum number of routes")),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(extra_route_opts)
|
||||
|
||||
|
||||
class RouterRoute(model_base.BASEV2, models_v2.Route):
|
||||
router_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('routers.id',
|
||||
ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
router = orm.relationship(l3_db.Router,
|
||||
backref=orm.backref("route_list",
|
||||
lazy='joined',
|
||||
cascade='delete'))
|
||||
|
||||
|
||||
class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin):
|
||||
"""Mixin class to support extra route configuration on router."""
|
||||
|
||||
def _extend_router_dict_extraroute(self, router_res, router_db):
|
||||
router_res['routes'] = (ExtraRoute_db_mixin.
|
||||
_make_extra_route_list(
|
||||
router_db['route_list']
|
||||
))
|
||||
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
l3.ROUTERS, ['_extend_router_dict_extraroute'])
|
||||
|
||||
def update_router(self, context, id, router):
|
||||
r = router['router']
|
||||
with context.session.begin(subtransactions=True):
|
||||
#check if route exists and have permission to access
|
||||
router_db = self._get_router(context, id)
|
||||
if 'routes' in r:
|
||||
self._update_extra_routes(context, router_db, r['routes'])
|
||||
routes = self._get_extra_routes_by_router_id(context, id)
|
||||
router_updated = super(ExtraRoute_db_mixin, self).update_router(
|
||||
context, id, router)
|
||||
router_updated['routes'] = routes
|
||||
|
||||
return router_updated
|
||||
|
||||
def _get_subnets_by_cidr(self, context, cidr):
|
||||
query_subnets = context.session.query(models_v2.Subnet)
|
||||
return query_subnets.filter_by(cidr=cidr).all()
|
||||
|
||||
def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop):
|
||||
#Note(nati): Nexthop should be connected,
|
||||
# so we need to check
|
||||
# nexthop belongs to one of cidrs of the router ports
|
||||
if not netaddr.all_matching_cidrs(nexthop, cidrs):
|
||||
raise extraroute.InvalidRoutes(
|
||||
routes=routes,
|
||||
reason=_('the nexthop is not connected with router'))
|
||||
#Note(nati) nexthop should not be same as fixed_ips
|
||||
if nexthop in ips:
|
||||
raise extraroute.InvalidRoutes(
|
||||
routes=routes,
|
||||
reason=_('the nexthop is used by router'))
|
||||
|
||||
def _validate_routes(self, context,
|
||||
router_id, routes):
|
||||
if len(routes) > cfg.CONF.max_routes:
|
||||
raise extraroute.RoutesExhausted(
|
||||
router_id=router_id,
|
||||
quota=cfg.CONF.max_routes)
|
||||
|
||||
filters = {'device_id': [router_id]}
|
||||
ports = self._core_plugin.get_ports(context, filters)
|
||||
cidrs = []
|
||||
ips = []
|
||||
for port in ports:
|
||||
for ip in port['fixed_ips']:
|
||||
cidrs.append(self._core_plugin._get_subnet(
|
||||
context, ip['subnet_id'])['cidr'])
|
||||
ips.append(ip['ip_address'])
|
||||
for route in routes:
|
||||
self._validate_routes_nexthop(
|
||||
cidrs, ips, routes, route['nexthop'])
|
||||
|
||||
def _update_extra_routes(self, context, router, routes):
|
||||
self._validate_routes(context, router['id'],
|
||||
routes)
|
||||
old_routes, routes_dict = self._get_extra_routes_dict_by_router_id(
|
||||
context, router['id'])
|
||||
added, removed = utils.diff_list_of_dict(old_routes,
|
||||
routes)
|
||||
LOG.debug(_('Added routes are %s'), added)
|
||||
for route in added:
|
||||
router_routes = RouterRoute(
|
||||
router_id=router['id'],
|
||||
destination=route['destination'],
|
||||
nexthop=route['nexthop'])
|
||||
context.session.add(router_routes)
|
||||
|
||||
LOG.debug(_('Removed routes are %s'), removed)
|
||||
for route in removed:
|
||||
context.session.delete(
|
||||
routes_dict[(route['destination'], route['nexthop'])])
|
||||
|
||||
@staticmethod
|
||||
def _make_extra_route_list(extra_routes):
|
||||
return [{'destination': route['destination'],
|
||||
'nexthop': route['nexthop']}
|
||||
for route in extra_routes]
|
||||
|
||||
def _get_extra_routes_by_router_id(self, context, id):
|
||||
query = context.session.query(RouterRoute)
|
||||
query = query.filter_by(router_id=id)
|
||||
return self._make_extra_route_list(query)
|
||||
|
||||
def _get_extra_routes_dict_by_router_id(self, context, id):
|
||||
query = context.session.query(RouterRoute)
|
||||
query = query.filter_by(router_id=id)
|
||||
routes = []
|
||||
routes_dict = {}
|
||||
for route in query:
|
||||
routes.append({'destination': route['destination'],
|
||||
'nexthop': route['nexthop']})
|
||||
routes_dict[(route['destination'], route['nexthop'])] = route
|
||||
return routes, routes_dict
|
||||
|
||||
def get_router(self, context, id, fields=None):
|
||||
with context.session.begin(subtransactions=True):
|
||||
router = super(ExtraRoute_db_mixin, self).get_router(
|
||||
context, id, fields)
|
||||
return router
|
||||
|
||||
def get_routers(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
with context.session.begin(subtransactions=True):
|
||||
routers = super(ExtraRoute_db_mixin, self).get_routers(
|
||||
context, filters, fields, sorts=sorts, limit=limit,
|
||||
marker=marker, page_reverse=page_reverse)
|
||||
return routers
|
||||
|
||||
def _confirm_router_interface_not_in_use(self, context, router_id,
|
||||
subnet_id):
|
||||
super(ExtraRoute_db_mixin, self)._confirm_router_interface_not_in_use(
|
||||
context, router_id, subnet_id)
|
||||
subnet_db = self._core_plugin._get_subnet(context, subnet_id)
|
||||
subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
|
||||
extra_routes = self._get_extra_routes_by_router_id(context, router_id)
|
||||
for route in extra_routes:
|
||||
if netaddr.all_matching_cidrs(route['nexthop'], [subnet_cidr]):
|
||||
raise extraroute.RouterInterfaceInUseByRoute(
|
||||
router_id=router_id, subnet_id=subnet_id)
|
|
@ -1,14 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,479 +0,0 @@
|
|||
# Copyright 2013 Big Switch Networks, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.ext.orderinglist import ordering_list
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.db import db_base_plugin_v2 as base_db
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import firewall
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.common import constants as const
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FirewallRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
"""Represents a Firewall rule."""
|
||||
__tablename__ = 'firewall_rules'
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(1024))
|
||||
firewall_policy_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('firewall_policies.id'),
|
||||
nullable=True)
|
||||
shared = sa.Column(sa.Boolean)
|
||||
protocol = sa.Column(sa.String(40))
|
||||
ip_version = sa.Column(sa.Integer, nullable=False)
|
||||
source_ip_address = sa.Column(sa.String(46))
|
||||
destination_ip_address = sa.Column(sa.String(46))
|
||||
source_port_range_min = sa.Column(sa.Integer)
|
||||
source_port_range_max = sa.Column(sa.Integer)
|
||||
destination_port_range_min = sa.Column(sa.Integer)
|
||||
destination_port_range_max = sa.Column(sa.Integer)
|
||||
action = sa.Column(sa.Enum('allow', 'deny', name='firewallrules_action'))
|
||||
enabled = sa.Column(sa.Boolean)
|
||||
position = sa.Column(sa.Integer)
|
||||
|
||||
|
||||
class Firewall(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
"""Represents a Firewall resource."""
|
||||
__tablename__ = 'firewalls'
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(1024))
|
||||
shared = sa.Column(sa.Boolean)
|
||||
admin_state_up = sa.Column(sa.Boolean)
|
||||
status = sa.Column(sa.String(16))
|
||||
firewall_policy_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('firewall_policies.id'),
|
||||
nullable=True)
|
||||
|
||||
|
||||
class FirewallPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
"""Represents a Firewall Policy resource."""
|
||||
__tablename__ = 'firewall_policies'
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(1024))
|
||||
shared = sa.Column(sa.Boolean)
|
||||
firewall_rules = orm.relationship(
|
||||
FirewallRule,
|
||||
backref=orm.backref('firewall_policies', cascade='all, delete'),
|
||||
order_by='FirewallRule.position',
|
||||
collection_class=ordering_list('position', count_from=1))
|
||||
audited = sa.Column(sa.Boolean)
|
||||
firewalls = orm.relationship(Firewall, backref='firewall_policies')
|
||||
|
||||
|
||||
class Firewall_db_mixin(firewall.FirewallPluginBase, base_db.CommonDbMixin):
|
||||
"""Mixin class for Firewall DB implementation."""
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
|
||||
def _get_firewall(self, context, id):
|
||||
try:
|
||||
return self._get_by_id(context, Firewall, id)
|
||||
except exc.NoResultFound:
|
||||
raise firewall.FirewallNotFound(firewall_id=id)
|
||||
|
||||
def _get_firewall_policy(self, context, id):
|
||||
try:
|
||||
return self._get_by_id(context, FirewallPolicy, id)
|
||||
except exc.NoResultFound:
|
||||
raise firewall.FirewallPolicyNotFound(firewall_policy_id=id)
|
||||
|
||||
def _get_firewall_rule(self, context, id):
|
||||
try:
|
||||
return self._get_by_id(context, FirewallRule, id)
|
||||
except exc.NoResultFound:
|
||||
raise firewall.FirewallRuleNotFound(firewall_rule_id=id)
|
||||
|
||||
def _make_firewall_dict(self, fw, fields=None):
|
||||
res = {'id': fw['id'],
|
||||
'tenant_id': fw['tenant_id'],
|
||||
'name': fw['name'],
|
||||
'description': fw['description'],
|
||||
'shared': fw['shared'],
|
||||
'admin_state_up': fw['admin_state_up'],
|
||||
'status': fw['status'],
|
||||
'firewall_policy_id': fw['firewall_policy_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_firewall_policy_dict(self, firewall_policy, fields=None):
|
||||
fw_rules = [rule['id'] for rule in firewall_policy['firewall_rules']]
|
||||
firewalls = [fw['id'] for fw in firewall_policy['firewalls']]
|
||||
res = {'id': firewall_policy['id'],
|
||||
'tenant_id': firewall_policy['tenant_id'],
|
||||
'name': firewall_policy['name'],
|
||||
'description': firewall_policy['description'],
|
||||
'shared': firewall_policy['shared'],
|
||||
'audited': firewall_policy['audited'],
|
||||
'firewall_rules': fw_rules,
|
||||
'firewall_list': firewalls}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_firewall_rule_dict(self, firewall_rule, fields=None):
|
||||
position = None
|
||||
# We return the position only if the firewall_rule is bound to a
|
||||
# firewall_policy.
|
||||
if firewall_rule['firewall_policy_id']:
|
||||
position = firewall_rule['position']
|
||||
src_port_range = self._get_port_range_from_min_max_ports(
|
||||
firewall_rule['source_port_range_min'],
|
||||
firewall_rule['source_port_range_max'])
|
||||
dst_port_range = self._get_port_range_from_min_max_ports(
|
||||
firewall_rule['destination_port_range_min'],
|
||||
firewall_rule['destination_port_range_max'])
|
||||
res = {'id': firewall_rule['id'],
|
||||
'tenant_id': firewall_rule['tenant_id'],
|
||||
'name': firewall_rule['name'],
|
||||
'description': firewall_rule['description'],
|
||||
'firewall_policy_id': firewall_rule['firewall_policy_id'],
|
||||
'shared': firewall_rule['shared'],
|
||||
'protocol': firewall_rule['protocol'],
|
||||
'ip_version': firewall_rule['ip_version'],
|
||||
'source_ip_address': firewall_rule['source_ip_address'],
|
||||
'destination_ip_address':
|
||||
firewall_rule['destination_ip_address'],
|
||||
'source_port': src_port_range,
|
||||
'destination_port': dst_port_range,
|
||||
'action': firewall_rule['action'],
|
||||
'position': position,
|
||||
'enabled': firewall_rule['enabled']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _set_rules_for_policy(self, context, firewall_policy_db, rule_id_list):
|
||||
fwp_db = firewall_policy_db
|
||||
with context.session.begin(subtransactions=True):
|
||||
if not rule_id_list:
|
||||
fwp_db.firewall_rules = []
|
||||
fwp_db.audited = False
|
||||
return
|
||||
# We will first check if the new list of rules is valid
|
||||
filters = {'id': [r_id for r_id in rule_id_list]}
|
||||
rules_in_db = self._get_collection_query(context, FirewallRule,
|
||||
filters=filters)
|
||||
rules_dict = dict((fwr_db['id'], fwr_db) for fwr_db in rules_in_db)
|
||||
for fwrule_id in rule_id_list:
|
||||
if fwrule_id not in rules_dict:
|
||||
# If we find an invalid rule in the list we
|
||||
# do not perform the update since this breaks
|
||||
# the integrity of this list.
|
||||
raise firewall.FirewallRuleNotFound(firewall_rule_id=
|
||||
fwrule_id)
|
||||
elif rules_dict[fwrule_id]['firewall_policy_id']:
|
||||
if (rules_dict[fwrule_id]['firewall_policy_id'] !=
|
||||
fwp_db['id']):
|
||||
raise firewall.FirewallRuleInUse(
|
||||
firewall_rule_id=fwrule_id)
|
||||
# New list of rules is valid so we will first reset the existing
|
||||
# list and then add each rule in order.
|
||||
# Note that the list could be empty in which case we interpret
|
||||
# it as clearing existing rules.
|
||||
fwp_db.firewall_rules = []
|
||||
for fwrule_id in rule_id_list:
|
||||
fwp_db.firewall_rules.append(rules_dict[fwrule_id])
|
||||
fwp_db.firewall_rules.reorder()
|
||||
fwp_db.audited = False
|
||||
|
||||
def _process_rule_for_policy(self, context, firewall_policy_id,
|
||||
firewall_rule_db, position):
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwp_query = context.session.query(
|
||||
FirewallPolicy).with_lockmode('update')
|
||||
fwp_db = fwp_query.filter_by(id=firewall_policy_id).one()
|
||||
if position:
|
||||
# Note that although position numbering starts at 1,
|
||||
# internal ordering of the list starts at 0, so we compensate.
|
||||
fwp_db.firewall_rules.insert(position - 1, firewall_rule_db)
|
||||
else:
|
||||
fwp_db.firewall_rules.remove(firewall_rule_db)
|
||||
fwp_db.firewall_rules.reorder()
|
||||
fwp_db.audited = False
|
||||
return self._make_firewall_policy_dict(fwp_db)
|
||||
|
||||
def _get_min_max_ports_from_range(self, port_range):
|
||||
if not port_range:
|
||||
return [None, None]
|
||||
min_port, sep, max_port = port_range.partition(":")
|
||||
if not max_port:
|
||||
max_port = min_port
|
||||
return [int(min_port), int(max_port)]
|
||||
|
||||
def _get_port_range_from_min_max_ports(self, min_port, max_port):
|
||||
if not min_port:
|
||||
return None
|
||||
if min_port == max_port:
|
||||
return str(min_port)
|
||||
else:
|
||||
return '%d:%d' % (min_port, max_port)
|
||||
|
||||
def _validate_fwr_protocol_parameters(self, fwr):
|
||||
protocol = fwr['protocol']
|
||||
if protocol not in (const.TCP, const.UDP):
|
||||
if fwr['source_port'] or fwr['destination_port']:
|
||||
raise firewall.FirewallRuleInvalidICMPParameter(
|
||||
param="Source, destination port")
|
||||
|
||||
def create_firewall(self, context, firewall):
|
||||
LOG.debug(_("create_firewall() called"))
|
||||
fw = firewall['firewall']
|
||||
tenant_id = self._get_tenant_id_for_create(context, fw)
|
||||
with context.session.begin(subtransactions=True):
|
||||
firewall_db = Firewall(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=fw['name'],
|
||||
description=fw['description'],
|
||||
firewall_policy_id=
|
||||
fw['firewall_policy_id'],
|
||||
admin_state_up=fw['admin_state_up'],
|
||||
status=const.PENDING_CREATE)
|
||||
context.session.add(firewall_db)
|
||||
return self._make_firewall_dict(firewall_db)
|
||||
|
||||
def update_firewall(self, context, id, firewall):
|
||||
LOG.debug(_("update_firewall() called"))
|
||||
fw = firewall['firewall']
|
||||
with context.session.begin(subtransactions=True):
|
||||
fw_query = context.session.query(
|
||||
Firewall).with_lockmode('update')
|
||||
firewall_db = fw_query.filter_by(id=id).one()
|
||||
firewall_db.update(fw)
|
||||
return self._make_firewall_dict(firewall_db)
|
||||
|
||||
def delete_firewall(self, context, id):
|
||||
LOG.debug(_("delete_firewall() called"))
|
||||
with context.session.begin(subtransactions=True):
|
||||
fw_query = context.session.query(
|
||||
Firewall).with_lockmode('update')
|
||||
firewall_db = fw_query.filter_by(id=id).one()
|
||||
# Note: Plugin should ensure that it's okay to delete if the
|
||||
# firewall is active
|
||||
context.session.delete(firewall_db)
|
||||
|
||||
def get_firewall(self, context, id, fields=None):
|
||||
LOG.debug(_("get_firewall() called"))
|
||||
fw = self._get_firewall(context, id)
|
||||
return self._make_firewall_dict(fw, fields)
|
||||
|
||||
def get_firewalls(self, context, filters=None, fields=None):
|
||||
LOG.debug(_("get_firewalls() called"))
|
||||
return self._get_collection(context, Firewall,
|
||||
self._make_firewall_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def get_firewalls_count(self, context, filters=None):
|
||||
LOG.debug(_("get_firewalls_count() called"))
|
||||
return self._get_collection_count(context, Firewall,
|
||||
filters=filters)
|
||||
|
||||
def create_firewall_policy(self, context, firewall_policy):
|
||||
LOG.debug(_("create_firewall_policy() called"))
|
||||
fwp = firewall_policy['firewall_policy']
|
||||
tenant_id = self._get_tenant_id_for_create(context, fwp)
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwp_db = FirewallPolicy(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=fwp['name'],
|
||||
description=fwp['description'],
|
||||
shared=fwp['shared'])
|
||||
context.session.add(fwp_db)
|
||||
self._set_rules_for_policy(context, fwp_db,
|
||||
fwp['firewall_rules'])
|
||||
fwp_db.audited = fwp['audited']
|
||||
return self._make_firewall_policy_dict(fwp_db)
|
||||
|
||||
def update_firewall_policy(self, context, id, firewall_policy):
|
||||
LOG.debug(_("update_firewall_policy() called"))
|
||||
fwp = firewall_policy['firewall_policy']
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwp_db = self._get_firewall_policy(context, id)
|
||||
if 'firewall_rules' in fwp:
|
||||
self._set_rules_for_policy(context, fwp_db,
|
||||
fwp['firewall_rules'])
|
||||
del fwp['firewall_rules']
|
||||
fwp_db.update(fwp)
|
||||
return self._make_firewall_policy_dict(fwp_db)
|
||||
|
||||
def delete_firewall_policy(self, context, id):
|
||||
LOG.debug(_("delete_firewall_policy() called"))
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwp = self._get_firewall_policy(context, id)
|
||||
# Ensure that the firewall_policy is not
|
||||
# being used
|
||||
qry = context.session.query(Firewall)
|
||||
if qry.filter_by(firewall_policy_id=id).first():
|
||||
raise firewall.FirewallPolicyInUse(firewall_policy_id=id)
|
||||
else:
|
||||
context.session.delete(fwp)
|
||||
|
||||
def get_firewall_policy(self, context, id, fields=None):
|
||||
LOG.debug(_("get_firewall_policy() called"))
|
||||
fwp = self._get_firewall_policy(context, id)
|
||||
return self._make_firewall_policy_dict(fwp, fields)
|
||||
|
||||
def get_firewall_policies(self, context, filters=None, fields=None):
|
||||
LOG.debug(_("get_firewall_policies() called"))
|
||||
return self._get_collection(context, FirewallPolicy,
|
||||
self._make_firewall_policy_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def get_firewalls_policies_count(self, context, filters=None):
|
||||
LOG.debug(_("get_firewall_policies_count() called"))
|
||||
return self._get_collection_count(context, FirewallPolicy,
|
||||
filters=filters)
|
||||
|
||||
def create_firewall_rule(self, context, firewall_rule):
|
||||
LOG.debug(_("create_firewall_rule() called"))
|
||||
fwr = firewall_rule['firewall_rule']
|
||||
self._validate_fwr_protocol_parameters(fwr)
|
||||
tenant_id = self._get_tenant_id_for_create(context, fwr)
|
||||
src_port_min, src_port_max = self._get_min_max_ports_from_range(
|
||||
fwr['source_port'])
|
||||
dst_port_min, dst_port_max = self._get_min_max_ports_from_range(
|
||||
fwr['destination_port'])
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwr_db = FirewallRule(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=fwr['name'],
|
||||
description=fwr['description'],
|
||||
shared=fwr['shared'],
|
||||
protocol=fwr['protocol'],
|
||||
ip_version=fwr['ip_version'],
|
||||
source_ip_address=fwr['source_ip_address'],
|
||||
destination_ip_address=
|
||||
fwr['destination_ip_address'],
|
||||
source_port_range_min=src_port_min,
|
||||
source_port_range_max=src_port_max,
|
||||
destination_port_range_min=dst_port_min,
|
||||
destination_port_range_max=dst_port_max,
|
||||
action=fwr['action'],
|
||||
enabled=fwr['enabled'])
|
||||
context.session.add(fwr_db)
|
||||
return self._make_firewall_rule_dict(fwr_db)
|
||||
|
||||
def update_firewall_rule(self, context, id, firewall_rule):
|
||||
LOG.debug(_("update_firewall_rule() called"))
|
||||
fwr = firewall_rule['firewall_rule']
|
||||
if 'source_port' in fwr:
|
||||
src_port_min, src_port_max = self._get_min_max_ports_from_range(
|
||||
fwr['source_port'])
|
||||
fwr['source_port_range_min'] = src_port_min
|
||||
fwr['source_port_range_max'] = src_port_max
|
||||
del fwr['source_port']
|
||||
if 'destination_port' in fwr:
|
||||
dst_port_min, dst_port_max = self._get_min_max_ports_from_range(
|
||||
fwr['destination_port'])
|
||||
fwr['destination_port_range_min'] = dst_port_min
|
||||
fwr['destination_port_range_max'] = dst_port_max
|
||||
del fwr['destination_port']
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwr_db = self._get_firewall_rule(context, id)
|
||||
fwr_db.update(fwr)
|
||||
if fwr_db.firewall_policy_id:
|
||||
fwp_db = self._get_firewall_policy(context,
|
||||
fwr_db.firewall_policy_id)
|
||||
fwp_db.audited = False
|
||||
return self._make_firewall_rule_dict(fwr_db)
|
||||
|
||||
def delete_firewall_rule(self, context, id):
|
||||
LOG.debug(_("delete_firewall_rule() called"))
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwr = self._get_firewall_rule(context, id)
|
||||
if fwr.firewall_policy_id:
|
||||
raise firewall.FirewallRuleInUse(firewall_rule_id=id)
|
||||
context.session.delete(fwr)
|
||||
|
||||
def get_firewall_rule(self, context, id, fields=None):
|
||||
LOG.debug(_("get_firewall_rule() called"))
|
||||
fwr = self._get_firewall_rule(context, id)
|
||||
return self._make_firewall_rule_dict(fwr, fields)
|
||||
|
||||
def get_firewall_rules(self, context, filters=None, fields=None):
|
||||
LOG.debug(_("get_firewall_rules() called"))
|
||||
return self._get_collection(context, FirewallRule,
|
||||
self._make_firewall_rule_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def get_firewalls_rules_count(self, context, filters=None):
|
||||
LOG.debug(_("get_firewall_rules_count() called"))
|
||||
return self._get_collection_count(context, FirewallRule,
|
||||
filters=filters)
|
||||
|
||||
def _validate_insert_remove_rule_request(self, id, rule_info):
|
||||
if not rule_info or 'firewall_rule_id' not in rule_info:
|
||||
raise firewall.FirewallRuleInfoMissing()
|
||||
|
||||
def insert_rule(self, context, id, rule_info):
|
||||
LOG.debug(_("insert_rule() called"))
|
||||
self._validate_insert_remove_rule_request(id, rule_info)
|
||||
firewall_rule_id = rule_info['firewall_rule_id']
|
||||
insert_before = True
|
||||
ref_firewall_rule_id = None
|
||||
if not firewall_rule_id:
|
||||
raise firewall.FirewallRuleNotFound(firewall_rule_id=None)
|
||||
if 'insert_before' in rule_info:
|
||||
ref_firewall_rule_id = rule_info['insert_before']
|
||||
if not ref_firewall_rule_id and 'insert_after' in rule_info:
|
||||
# If insert_before is set, we will ignore insert_after.
|
||||
ref_firewall_rule_id = rule_info['insert_after']
|
||||
insert_before = False
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwr_db = self._get_firewall_rule(context, firewall_rule_id)
|
||||
if fwr_db.firewall_policy_id:
|
||||
raise firewall.FirewallRuleInUse(firewall_rule_id=fwr_db['id'])
|
||||
if ref_firewall_rule_id:
|
||||
# If reference_firewall_rule_id is set, the new rule
|
||||
# is inserted depending on the value of insert_before.
|
||||
# If insert_before is set, the new rule is inserted before
|
||||
# reference_firewall_rule_id, and if it is not set the new
|
||||
# rule is inserted after reference_firewall_rule_id.
|
||||
ref_fwr_db = self._get_firewall_rule(
|
||||
context, ref_firewall_rule_id)
|
||||
if insert_before:
|
||||
position = ref_fwr_db.position
|
||||
else:
|
||||
position = ref_fwr_db.position + 1
|
||||
else:
|
||||
# If reference_firewall_rule_id is not set, it is assumed
|
||||
# that the new rule needs to be inserted at the top.
|
||||
# insert_before field is ignored.
|
||||
# So default insertion is always at the top.
|
||||
# Also note that position numbering starts at 1.
|
||||
position = 1
|
||||
return self._process_rule_for_policy(context, id, fwr_db,
|
||||
position)
|
||||
|
||||
def remove_rule(self, context, id, rule_info):
|
||||
LOG.debug(_("remove_rule() called"))
|
||||
self._validate_insert_remove_rule_request(id, rule_info)
|
||||
firewall_rule_id = rule_info['firewall_rule_id']
|
||||
if not firewall_rule_id:
|
||||
raise firewall.FirewallRuleNotFound(firewall_rule_id=None)
|
||||
with context.session.begin(subtransactions=True):
|
||||
fwr_db = self._get_firewall_rule(context, firewall_rule_id)
|
||||
if fwr_db.firewall_policy_id != id:
|
||||
raise firewall.FirewallRuleNotAssociatedWithPolicy(
|
||||
firewall_rule_id=fwr_db['id'],
|
||||
firewall_policy_id=id)
|
||||
return self._process_rule_for_policy(context, id, fwr_db, None)
|
|
@ -1,398 +0,0 @@
|
|||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.db import agents_db
|
||||
from neutron.db import agentschedulers_db
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import l3agentscheduler
|
||||
from neutron import manager
|
||||
|
||||
L3_AGENTS_SCHEDULER_OPTS = [
|
||||
cfg.StrOpt('router_scheduler_driver',
|
||||
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
|
||||
help=_('Driver to use for scheduling '
|
||||
'router to a default L3 agent')),
|
||||
cfg.BoolOpt('router_auto_schedule', default=True,
|
||||
help=_('Allow auto scheduling of routers to L3 agent.')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
|
||||
|
||||
|
||||
class RouterL3AgentBinding(model_base.BASEV2, models_v2.HasId):
|
||||
"""Represents binding between neutron routers and L3 agents."""
|
||||
|
||||
router_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("routers.id", ondelete='CASCADE'))
|
||||
l3_agent = orm.relation(agents_db.Agent)
|
||||
l3_agent_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("agents.id",
|
||||
ondelete='CASCADE'))
|
||||
|
||||
|
||||
class CentralizedSnatL3AgentBinding(model_base.BASEV2, models_v2.HasId):
|
||||
"""Represents binding between neutron Centralized SNAT and L3 agents."""
|
||||
|
||||
router_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("routers.id", ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
csnat_gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
|
||||
host_id = sa.Column(sa.String(255))
|
||||
l3_agent = orm.relation(agents_db.Agent)
|
||||
l3_agent_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("agents.id",
|
||||
ondelete='CASCADE'))
|
||||
csnat_gw_port = orm.relationship(models_v2.Port)
|
||||
|
||||
|
||||
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
agentschedulers_db.AgentSchedulerDbMixin):
|
||||
"""Mixin class to add l3 agent scheduler extension to plugins
|
||||
using the l3 agent for routing.
|
||||
"""
|
||||
|
||||
router_scheduler = None
|
||||
|
||||
def add_router_to_l3_agent(self, context, agent_id, router_id):
|
||||
"""Add a l3 agent to host a router."""
|
||||
router = self.get_router(context, router_id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
agent_db = self._get_agent(context, agent_id)
|
||||
agent_conf = self.get_configuration_dict(agent_db)
|
||||
distributed_router_enable = agent_conf.get(
|
||||
'distributed_agent', False)
|
||||
distributed = router.get('distributed', False)
|
||||
if (distributed and not distributed_router_enable):
|
||||
raise l3agentscheduler.DistributedRouterNotHostedByL3Agent(
|
||||
router_id=router_id, agent_id=agent_id)
|
||||
if (not distributed and distributed_router_enable):
|
||||
raise l3agentscheduler.RouterNotHostedByDistributedL3Agent(
|
||||
router_id=router_id, agent_id=agent_id)
|
||||
if (agent_db['agent_type'] != constants.AGENT_TYPE_L3 or
|
||||
not agent_db['admin_state_up'] or
|
||||
not self.get_l3_agent_candidates(context,
|
||||
router,
|
||||
[agent_db])):
|
||||
raise l3agentscheduler.InvalidL3Agent(id=agent_id)
|
||||
query = context.session.query(RouterL3AgentBinding)
|
||||
if distributed:
|
||||
binding = query.filter_by(router_id=router_id,
|
||||
l3_agent_id=agent_id).first()
|
||||
if binding:
|
||||
raise l3agentscheduler.RouterHostedByL3Agent(
|
||||
router_id=router_id,
|
||||
agent_id=binding.l3_agent_id)
|
||||
else:
|
||||
try:
|
||||
binding = query.filter_by(router_id=router_id).one()
|
||||
|
||||
raise l3agentscheduler.RouterHostedByL3Agent(
|
||||
router_id=router_id,
|
||||
agent_id=binding.l3_agent_id)
|
||||
except exc.NoResultFound:
|
||||
pass
|
||||
|
||||
result = self.auto_schedule_routers(context,
|
||||
agent_db.host,
|
||||
[router_id])
|
||||
if not result:
|
||||
raise l3agentscheduler.RouterSchedulingFailed(
|
||||
router_id=router_id, agent_id=agent_id)
|
||||
|
||||
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
|
||||
if l3_notifier:
|
||||
l3_notifier.router_added_to_agent(
|
||||
context, [router_id], agent_db.host)
|
||||
|
||||
def remove_router_from_l3_agent(self, context, agent_id, router_id):
|
||||
"""Remove the router from l3 agent.
|
||||
|
||||
After removal, the router will be non-hosted until there is update
|
||||
which leads to re-schedule or be added to another agent manually.
|
||||
"""
|
||||
agent = self._get_agent(context, agent_id)
|
||||
floating_ips = self._get_sync_floating_ips(context, [router_id])
|
||||
if floating_ips:
|
||||
raise l3agentscheduler.RemoveFloatingIPforRouter(
|
||||
router_id=router_id, agent_id=agent_id)
|
||||
self._unbind_router(context, router_id, agent_id)
|
||||
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
|
||||
if l3_notifier:
|
||||
l3_notifier.router_removed_from_agent(
|
||||
context, router_id, agent.host)
|
||||
|
||||
def _unbind_router(self, context, router_id, agent_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
query = context.session.query(RouterL3AgentBinding)
|
||||
query = query.filter(
|
||||
RouterL3AgentBinding.router_id == router_id,
|
||||
RouterL3AgentBinding.l3_agent_id == agent_id)
|
||||
try:
|
||||
binding = query.one()
|
||||
except exc.NoResultFound:
|
||||
raise l3agentscheduler.RouterNotHostedByL3Agent(
|
||||
router_id=router_id, agent_id=agent_id)
|
||||
context.session.delete(binding)
|
||||
|
||||
def reschedule_router(self, context, router_id, candidates=None):
|
||||
"""Reschedule router to a new l3 agent
|
||||
|
||||
Remove the router from the agent(s) currently hosting it and
|
||||
schedule it again
|
||||
"""
|
||||
cur_agents = self.list_l3_agents_hosting_router(
|
||||
context, router_id)['agents']
|
||||
with context.session.begin(subtransactions=True):
|
||||
for agent in cur_agents:
|
||||
self._unbind_router(context, router_id, agent['id'])
|
||||
|
||||
new_agent = self.schedule_router(context, router_id,
|
||||
candidates=candidates)
|
||||
if not new_agent:
|
||||
raise l3agentscheduler.RouterReschedulingFailed(
|
||||
router_id=router_id)
|
||||
|
||||
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
|
||||
if l3_notifier:
|
||||
for agent in cur_agents:
|
||||
l3_notifier.router_removed_from_agent(
|
||||
context, router_id, agent['host'])
|
||||
l3_notifier.router_added_to_agent(
|
||||
context, [router_id], new_agent.host)
|
||||
|
||||
def list_routers_on_l3_agent(self, context, agent_id):
|
||||
query = context.session.query(RouterL3AgentBinding.router_id)
|
||||
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
|
||||
|
||||
router_ids = [item[0] for item in query]
|
||||
if router_ids:
|
||||
return {'routers':
|
||||
self.get_routers(context, filters={'id': router_ids})}
|
||||
else:
|
||||
return {'routers': []}
|
||||
|
||||
def list_active_sync_routers_on_active_l3_agent(
|
||||
self, context, host, router_ids):
|
||||
agent = self._get_agent_by_type_and_host(
|
||||
context, constants.AGENT_TYPE_L3, host)
|
||||
if not agent.admin_state_up:
|
||||
return []
|
||||
query = context.session.query(RouterL3AgentBinding.router_id)
|
||||
query = query.filter(
|
||||
RouterL3AgentBinding.l3_agent_id == agent.id)
|
||||
|
||||
if not router_ids:
|
||||
pass
|
||||
else:
|
||||
query = query.filter(
|
||||
RouterL3AgentBinding.router_id.in_(router_ids))
|
||||
router_ids = [item[0] for item in query]
|
||||
if router_ids:
|
||||
return self.get_sync_data(context, router_ids=router_ids,
|
||||
active=True)
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_l3_agents_hosting_routers(self, context, router_ids,
|
||||
admin_state_up=None,
|
||||
active=None):
|
||||
if not router_ids:
|
||||
return []
|
||||
query = context.session.query(RouterL3AgentBinding)
|
||||
if len(router_ids) > 1:
|
||||
query = query.options(joinedload('l3_agent')).filter(
|
||||
RouterL3AgentBinding.router_id.in_(router_ids))
|
||||
else:
|
||||
query = query.options(joinedload('l3_agent')).filter(
|
||||
RouterL3AgentBinding.router_id == router_ids[0])
|
||||
if admin_state_up is not None:
|
||||
query = (query.filter(agents_db.Agent.admin_state_up ==
|
||||
admin_state_up))
|
||||
l3_agents = [binding.l3_agent for binding in query]
|
||||
if active is not None:
|
||||
l3_agents = [l3_agent for l3_agent in
|
||||
l3_agents if not
|
||||
agents_db.AgentDbMixin.is_agent_down(
|
||||
l3_agent['heartbeat_timestamp'])]
|
||||
return l3_agents
|
||||
|
||||
def _get_l3_bindings_hosting_routers(self, context, router_ids):
|
||||
if not router_ids:
|
||||
return []
|
||||
query = context.session.query(RouterL3AgentBinding)
|
||||
if len(router_ids) > 1:
|
||||
query = query.options(joinedload('l3_agent')).filter(
|
||||
RouterL3AgentBinding.router_id.in_(router_ids))
|
||||
else:
|
||||
query = query.options(joinedload('l3_agent')).filter(
|
||||
RouterL3AgentBinding.router_id == router_ids[0])
|
||||
return query.all()
|
||||
|
||||
def list_l3_agents_hosting_router(self, context, router_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
bindings = self._get_l3_bindings_hosting_routers(
|
||||
context, [router_id])
|
||||
results = []
|
||||
for binding in bindings:
|
||||
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
|
||||
results.append(l3_agent_dict)
|
||||
if results:
|
||||
return {'agents': results}
|
||||
else:
|
||||
return {'agents': []}
|
||||
|
||||
def get_l3_agents(self, context, active=None, filters=None):
|
||||
query = context.session.query(agents_db.Agent)
|
||||
query = query.filter(
|
||||
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
|
||||
if active is not None:
|
||||
query = (query.filter(agents_db.Agent.admin_state_up == active))
|
||||
if filters:
|
||||
for key, value in filters.iteritems():
|
||||
column = getattr(agents_db.Agent, key, None)
|
||||
if column:
|
||||
query = query.filter(column.in_(value))
|
||||
|
||||
return [l3_agent
|
||||
for l3_agent in query
|
||||
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
|
||||
active, l3_agent)]
|
||||
|
||||
def list_interfaces_onrouter(self, context, router_id):
|
||||
if router_id:
|
||||
return (context.session.query(models_v2.Port).
|
||||
filter_by(device_id=router_id).all())
|
||||
|
||||
def check_vmexists_on_l3agent(self, context, l3_agent, router_id,
|
||||
subnet_id):
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
if not subnet_id:
|
||||
return True
|
||||
filter = {'fixed_ips': {'subnet_id': [subnet_id]}}
|
||||
ports = core_plugin.get_ports(context, filters=filter)
|
||||
for port in ports:
|
||||
if "compute:" in port['device_owner']:
|
||||
if l3_agent['host'] == port['binding:host_id']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_snat_candidates(self, sync_router, l3_agents):
|
||||
""" Get the valid snat enabled l3 agents for the distributed router"""
|
||||
candidates = []
|
||||
if not sync_router.get('distributed', False):
|
||||
return []
|
||||
for l3_agent in l3_agents:
|
||||
if not l3_agent.admin_state_up:
|
||||
continue
|
||||
agent_conf = self.get_configuration_dict(l3_agent)
|
||||
router_id = agent_conf.get('router_id', None)
|
||||
use_namespaces = agent_conf.get('use_namespaces', True)
|
||||
handle_internal_only_routers = agent_conf.get(
|
||||
'handle_internal_only_routers', True)
|
||||
gateway_external_network_id = agent_conf.get(
|
||||
'gateway_external_network_id', None)
|
||||
distributed_router_enable = agent_conf.get(
|
||||
'distributed_agent', False)
|
||||
centralized_snat_enable = agent_conf.get(
|
||||
'centralized_snat', False)
|
||||
if (not distributed_router_enable or
|
||||
not centralized_snat_enable):
|
||||
continue
|
||||
if not use_namespaces and router_id != sync_router['id']:
|
||||
continue
|
||||
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
|
||||
'network_id')
|
||||
if ((not ex_net_id and not handle_internal_only_routers) or
|
||||
(ex_net_id and gateway_external_network_id and
|
||||
ex_net_id != gateway_external_network_id)):
|
||||
continue
|
||||
if (sync_router.get('distributed', False) and
|
||||
distributed_router_enable and
|
||||
centralized_snat_enable):
|
||||
candidates.append(l3_agent)
|
||||
return candidates
|
||||
|
||||
def get_l3_agent_candidates(self, context, sync_router, l3_agents,
|
||||
subnet_id=None):
|
||||
"""Get the valid l3 agents for the router from a list of l3_agents."""
|
||||
candidates = []
|
||||
for l3_agent in l3_agents:
|
||||
if not l3_agent.admin_state_up:
|
||||
continue
|
||||
agent_conf = self.get_configuration_dict(l3_agent)
|
||||
router_id = agent_conf.get('router_id', None)
|
||||
use_namespaces = agent_conf.get('use_namespaces', True)
|
||||
handle_internal_only_routers = agent_conf.get(
|
||||
'handle_internal_only_routers', True)
|
||||
gateway_external_network_id = agent_conf.get(
|
||||
'gateway_external_network_id', None)
|
||||
distributed_router_enable = agent_conf.get(
|
||||
'distributed_agent', False)
|
||||
centralized = agent_conf.get(
|
||||
'centralized_router', True)
|
||||
if not use_namespaces and router_id != sync_router['id']:
|
||||
continue
|
||||
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
|
||||
'network_id')
|
||||
if ((not ex_net_id and not handle_internal_only_routers) or
|
||||
(ex_net_id and gateway_external_network_id and
|
||||
ex_net_id != gateway_external_network_id)):
|
||||
continue
|
||||
if not sync_router.get('distributed', False):
|
||||
if (not distributed_router_enable) or centralized:
|
||||
candidates.append(l3_agent)
|
||||
else:
|
||||
if (distributed_router_enable and
|
||||
self.check_vmexists_on_l3agent(context,
|
||||
l3_agent,
|
||||
sync_router['id'],
|
||||
subnet_id)):
|
||||
candidates.append(l3_agent)
|
||||
return candidates
|
||||
|
||||
def auto_schedule_routers(self, context, host, router_ids):
|
||||
if self.router_scheduler:
|
||||
return self.router_scheduler.auto_schedule_routers(
|
||||
self, context, host, router_ids)
|
||||
|
||||
def schedule_router(self, context, router, candidates=None, hints=None):
|
||||
if self.router_scheduler:
|
||||
return self.router_scheduler.schedule(
|
||||
self, context, router, candidates=candidates, hints=hints)
|
||||
|
||||
def schedule_routers(self, context, routers, hints=None):
|
||||
"""Schedule the routers to l3 agents."""
|
||||
for router in routers:
|
||||
self.schedule_router(context, router, candidates=None, hints=hints)
|
||||
|
||||
def get_l3_agent_with_min_routers(self, context, agent_ids):
|
||||
"""Return l3 agent with the least number of routers."""
|
||||
query = context.session.query(
|
||||
agents_db.Agent,
|
||||
func.count(
|
||||
RouterL3AgentBinding.router_id
|
||||
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
|
||||
RouterL3AgentBinding.l3_agent_id).order_by('count')
|
||||
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
|
||||
return res[0]
|
|
@ -1,76 +0,0 @@
|
|||
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import model_base
|
||||
from neutron.extensions import l3
|
||||
|
||||
|
||||
class RouterExtraAttributes(model_base.BASEV2):
|
||||
"""Additional attributes for a Virtual Router."""
|
||||
|
||||
# NOTE(armando-migliaccio): this model can be a good place to
|
||||
# add extension attributes to a Router model. Each case needs
|
||||
# to be individually examined, however 'distributed' and other
|
||||
# simple ones fit the pattern well.
|
||||
__tablename__ = "router_extra_attributes"
|
||||
router_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('routers.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
# Whether the router is a legacy (centralized) or a distributed one
|
||||
distributed = sa.Column(sa.Boolean, default=False, nullable=False)
|
||||
router = orm.relationship(
|
||||
l3_db.Router,
|
||||
backref=orm.backref("extra_attributes", lazy='joined',
|
||||
uselist=False, cascade='delete'))
|
||||
|
||||
|
||||
class ExtraAttributesMixin(object):
|
||||
"""Mixin class to enable router's extra attributes."""
|
||||
|
||||
extra_attributes = []
|
||||
|
||||
def _extend_extra_router_dict(self, router_res, router_db):
|
||||
extra_attrs = router_db['extra_attributes']
|
||||
for attr in self.extra_attributes:
|
||||
name = attr['name']
|
||||
default = attr['default']
|
||||
router_res[name] = (
|
||||
extra_attrs and extra_attrs[name] or default)
|
||||
|
||||
def _get_extra_attributes(self, router, extra_attributes):
|
||||
return (dict((attr['name'],
|
||||
router.get(attr['name'], attr['default']))
|
||||
for attr in extra_attributes))
|
||||
|
||||
def _process_extra_attr_router_create(
|
||||
self, context, router_db, router_req):
|
||||
kwargs = self._get_extra_attributes(router_req, self.extra_attributes)
|
||||
# extra_attributes reference is populated via backref
|
||||
if not router_db['extra_attributes']:
|
||||
attributes_db = RouterExtraAttributes(
|
||||
router_id=router_db['id'], **kwargs)
|
||||
context.session.add(attributes_db)
|
||||
router_db['extra_attributes'] = attributes_db
|
||||
else:
|
||||
# The record will exist if RouterExtraAttributes model's
|
||||
# attributes are added with db migrations over time
|
||||
router_db['extra_attributes'].update(kwargs)
|
||||
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
l3.ROUTERS, ['_extend_extra_router_dict'])
|
File diff suppressed because it is too large
Load Diff
|
@ -1,434 +0,0 @@
|
|||
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants as l3_const
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import l3_attrs_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import portbindings
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE
|
||||
DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT
|
||||
FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY
|
||||
DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW
|
||||
SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY
|
||||
|
||||
|
||||
router_distributed_opts = [
|
||||
cfg.BoolOpt('router_distributed',
|
||||
default=False,
|
||||
help=_("System-wide flag to determine the type of router "
|
||||
"that tenants can create. Only admin can override.")),
|
||||
]
|
||||
cfg.CONF.register_opts(router_distributed_opts)
|
||||
|
||||
|
||||
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
|
||||
l3_attrs_db.ExtraAttributesMixin):
|
||||
"""Mixin class to enable DVR support."""
|
||||
|
||||
router_device_owners = (
|
||||
l3_db.L3_NAT_db_mixin.
|
||||
router_device_owners + (DEVICE_OWNER_DVR_INTERFACE,))
|
||||
|
||||
extra_attributes = (
|
||||
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
|
||||
'name': "distributed",
|
||||
'default': cfg.CONF.router_distributed
|
||||
}])
|
||||
|
||||
def _create_router_db(self, context, router, tenant_id):
|
||||
"""Create a router db object with dvr additions."""
|
||||
router['distributed'] = _is_distributed_router(router)
|
||||
with context.session.begin(subtransactions=True):
|
||||
router_db = super(
|
||||
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
|
||||
context, router, tenant_id)
|
||||
self._process_extra_attr_router_create(context, router_db, router)
|
||||
return router_db
|
||||
|
||||
def _validate_router_migration(self, router_db, router_res):
|
||||
"""Allow centralized -> distributed state transition only."""
|
||||
if (router_db.extra_attributes.distributed and
|
||||
not router_res.get('distributed')):
|
||||
# NOTE(armando-migliaccio): tell the client that there is no
|
||||
# going back for a distributed router, at least for now.
|
||||
raise NotImplementedError()
|
||||
|
||||
def _update_distributed_attr(
|
||||
self, context, router_id, router_db, data, gw_info):
|
||||
"""Update the model to support the dvr case of a router."""
|
||||
if not gw_info and 'distributed' in data and data['distributed']:
|
||||
admin_ctx = context.elevated()
|
||||
filters = {'device_id': [router_id],
|
||||
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_INTF]}
|
||||
ports = self._core_plugin.get_ports(admin_ctx, filters=filters)
|
||||
for p in ports:
|
||||
port_db = self._core_plugin._get_port(admin_ctx, p['id'])
|
||||
port_db.update({'device_owner': DEVICE_OWNER_DVR_INTERFACE})
|
||||
|
||||
def _update_router_db(self, context, router_id, data, gw_info):
|
||||
with context.session.begin(subtransactions=True):
|
||||
router_db = super(
|
||||
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
|
||||
context, router_id, data, gw_info)
|
||||
self._validate_router_migration(router_db, data)
|
||||
router_db.extra_attributes.update(data)
|
||||
self._update_distributed_attr(
|
||||
context, router_id, router_db, data, gw_info)
|
||||
return router_db
|
||||
|
||||
def _delete_current_gw_port(self, context, router_id, router, new_network):
|
||||
super(L3_NAT_with_dvr_db_mixin,
|
||||
self)._delete_current_gw_port(context, router_id,
|
||||
router, new_network)
|
||||
if router.extra_attributes.distributed:
|
||||
self.delete_csnat_router_interface_ports(context, router)
|
||||
|
||||
def _create_gw_port(self, context, router_id, router, new_network):
|
||||
super(L3_NAT_with_dvr_db_mixin,
|
||||
self)._create_gw_port(context, router_id,
|
||||
router, new_network)
|
||||
if router.extra_attributes.distributed:
|
||||
snat_p_list = self.create_snat_intf_ports_if_not_exists(
|
||||
context.elevated(), router['id'])
|
||||
if not snat_p_list:
|
||||
LOG.debug(_("SNAT interface ports not created: %s"),
|
||||
snat_p_list)
|
||||
|
||||
def _get_device_owner(self, context, router=None):
|
||||
"""Get device_owner for the specified router."""
|
||||
router_is_uuid = isinstance(router, basestring)
|
||||
if router_is_uuid:
|
||||
router = self._get_router(context, router)
|
||||
if _is_distributed_router(router):
|
||||
return DEVICE_OWNER_DVR_INTERFACE
|
||||
return super(L3_NAT_with_dvr_db_mixin,
|
||||
self)._get_device_owner(context, router)
|
||||
|
||||
def _get_interface_ports_for_network(self, context, network_id):
|
||||
router_intf_qry = (context.session.query(models_v2.Port).
|
||||
filter_by(network_id=network_id))
|
||||
return (router_intf_qry.
|
||||
filter(models_v2.Port.device_owner.in_(
|
||||
[l3_const.DEVICE_OWNER_ROUTER_INTF,
|
||||
DEVICE_OWNER_DVR_INTERFACE])))
|
||||
|
||||
def get_snat_sync_interfaces(self, context, router_ids):
|
||||
"""Query router interfaces that relate to list of router_ids."""
|
||||
if not router_ids:
|
||||
return []
|
||||
filters = {'device_id': router_ids,
|
||||
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
|
||||
interfaces = self._core_plugin.get_ports(context, filters)
|
||||
LOG.debug("Return the SNAT ports: %s ", interfaces)
|
||||
if interfaces:
|
||||
self._populate_subnet_for_ports(context, interfaces)
|
||||
return interfaces
|
||||
|
||||
def _process_routers(self, context, routers):
|
||||
routers_dict = {}
|
||||
for router in routers:
|
||||
routers_dict[router['id']] = router
|
||||
router_ids = [router['id']]
|
||||
if router['gw_port_id']:
|
||||
snat_router_intfs = self.get_snat_sync_interfaces(context,
|
||||
router_ids)
|
||||
LOG.info(_("SNAT ports returned : %s "), snat_router_intfs)
|
||||
router[SNAT_ROUTER_INTF_KEY] = snat_router_intfs
|
||||
return routers_dict
|
||||
|
||||
def _process_floating_ips(self, context, routers_dict, floating_ips):
|
||||
for floating_ip in floating_ips:
|
||||
router = routers_dict.get(floating_ip['router_id'])
|
||||
if router:
|
||||
router_floatingips = router.get(l3_const.FLOATINGIP_KEY, [])
|
||||
floatingip_agent_intfs = []
|
||||
if router['distributed']:
|
||||
floating_ip['host'] = self.get_vm_port_hostid(
|
||||
context, floating_ip['port_id'])
|
||||
LOG.debug("Floating IP host: %s", floating_ip['host'])
|
||||
fip_agent = self._get_agent_by_type_and_host(
|
||||
context, l3_const.AGENT_TYPE_L3,
|
||||
floating_ip['host'])
|
||||
LOG.debug("FIP Agent : %s ", fip_agent['id'])
|
||||
floatingip_agent_intfs = self.get_fip_sync_interfaces(
|
||||
context, fip_agent['id'])
|
||||
LOG.debug("FIP Agent ports: %s", floatingip_agent_intfs)
|
||||
router_floatingips.append(floating_ip)
|
||||
#router_floatingip_agent_intfs.append(floatingip_agent_intfs)
|
||||
router[l3_const.FLOATINGIP_KEY] = router_floatingips
|
||||
router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
|
||||
floatingip_agent_intfs)
|
||||
|
||||
def get_sync_data(self, context, router_ids=None, active=None):
|
||||
routers, interfaces, floating_ips = self._get_router_info_list(
|
||||
context, router_ids=router_ids, active=active,
|
||||
device_owners=[l3_const.DEVICE_OWNER_ROUTER_INTF,
|
||||
DEVICE_OWNER_DVR_INTERFACE])
|
||||
# Add the port binding host to the floatingip dictionary
|
||||
for fip in floating_ips:
|
||||
fip['host'] = self.get_vm_port_hostid(context, fip['port_id'])
|
||||
routers_dict = self._process_routers(context, routers)
|
||||
self._process_floating_ips(context, routers_dict, floating_ips)
|
||||
self._process_interfaces(routers_dict, interfaces)
|
||||
return routers_dict.values()
|
||||
|
||||
def get_vm_port_hostid(self, context, port_id, port=None):
|
||||
"""Return the portbinding host_id."""
|
||||
vm_port_db = port or self._core_plugin.get_port(context, port_id)
|
||||
if vm_port_db and (
|
||||
"compute:" in vm_port_db['device_owner'] or
|
||||
DEVICE_OWNER_AGENT_GW in vm_port_db['device_owner'] or
|
||||
"neutron:LOADBALANCER" in vm_port_db['device_owner']):
|
||||
return vm_port_db[portbindings.HOST_ID]
|
||||
|
||||
def get_agent_gw_ports_exist_for_network(
|
||||
self, context, network_id, host, agent_id):
|
||||
"""Return agent gw port if exist, or None otherwise."""
|
||||
if not network_id:
|
||||
LOG.debug("No Network id exists")
|
||||
return
|
||||
filters = {'network_id': network_id,
|
||||
'device_owner': DEVICE_OWNER_AGENT_GW}
|
||||
ports = self._core_plugin.get_ports(context.elevated(), filters)
|
||||
for p in ports:
|
||||
if(self.get_vm_port_hostid(context, p['id'], p) == host and
|
||||
p['device_id'] == agent_id):
|
||||
return p
|
||||
|
||||
def check_for_floatingip_and_return_with_hostid(self, context, router_id):
|
||||
"""Helper function to check for FIP and return Host id."""
|
||||
# FIXME(swami): what is the "break" condition for fp_host to be
|
||||
# returned to the caller?
|
||||
fp = self._get_sync_floating_ips(context, [router_id])
|
||||
fp_host = None
|
||||
for fid in fp:
|
||||
port_db = self._core_plugin._get_port(context, fid['port_id'])
|
||||
fp_host = self.get_vm_port_hostid(context, port_db['id'], port_db)
|
||||
return fp_host
|
||||
|
||||
def check_fips_availability_on_host(self, context, fip_id, host_id):
|
||||
"""Query all floating_ips and filter by particular host."""
|
||||
fip_count_on_host = 0
|
||||
admin_ctx = context.elevated()
|
||||
with context.session.begin(subtransactions=True):
|
||||
routers = self._get_sync_routers(admin_ctx, router_ids=None)
|
||||
router_ids = [router['id'] for router in routers]
|
||||
floating_ips = self._get_sync_floating_ips(admin_ctx, router_ids)
|
||||
# Check for the active floatingip in the host
|
||||
for fip in floating_ips:
|
||||
f_host = self.get_vm_port_hostid(admin_ctx, fip['port_id'])
|
||||
if f_host == host_id:
|
||||
fip_count_on_host += 1
|
||||
# If fip_count greater than 1 or equal to zero no action taken
|
||||
# if the fip_count is equal to 1, then this would be last active
|
||||
# fip in the host, so the agent gateway port can be deleted.
|
||||
if fip_count_on_host == 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_floatingip_agent_gateway_port(self, context, host_id):
|
||||
"""Function to delete the FIP agent gateway port on host."""
|
||||
# delete any fip agent gw port
|
||||
device_filter = {'device_owner': [DEVICE_OWNER_AGENT_GW]}
|
||||
ports = self._core_plugin.get_ports(context.elevated(),
|
||||
filters=device_filter)
|
||||
for p in ports:
|
||||
if self.get_vm_port_hostid(context, p['id'], p) == host_id:
|
||||
self._core_plugin._delete_port(context.elevated(),
|
||||
p['id'])
|
||||
return
|
||||
|
||||
def create_fip_agent_gw_port_if_not_exists(
|
||||
self, context, network_id, host):
|
||||
"""Function to return the FIP Agent GW port.
|
||||
|
||||
This function will create a FIP Agent GW port
|
||||
if required. If the port already exists, it
|
||||
will return the existing port and will not
|
||||
create a new one.
|
||||
"""
|
||||
l3_agent_db = self._get_agent_by_type_and_host(
|
||||
context, l3_const.AGENT_TYPE_L3, host)
|
||||
if l3_agent_db:
|
||||
LOG.info(_("Agent ID exists: %s"), l3_agent_db['id'])
|
||||
# TODO(Swami): is this call still valid for external agent gw port?
|
||||
f_port = self.get_agent_gw_ports_exist_for_network(
|
||||
context, network_id, host, l3_agent_db['id'])
|
||||
if not f_port:
|
||||
LOG.info(_('Agent Gateway port does not exist,'
|
||||
' so create one: %s'), f_port)
|
||||
agent_port = self._core_plugin.create_port(
|
||||
context.elevated(),
|
||||
{'port': {'tenant_id': '',
|
||||
'network_id': network_id,
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
|
||||
'device_id': l3_agent_db['id'],
|
||||
'device_owner': DEVICE_OWNER_AGENT_GW,
|
||||
'admin_state_up': True,
|
||||
'name': ''}})
|
||||
if agent_port:
|
||||
self._populate_subnet_for_ports(context, [agent_port])
|
||||
return agent_port
|
||||
msg = _("Unable to create the Agent Gateway Port")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
else:
|
||||
self._populate_subnet_for_ports(context, [f_port])
|
||||
return f_port
|
||||
|
||||
def get_snat_interface_ports_for_router(self, context, router_id):
|
||||
"""Return all existing snat_router_interface ports."""
|
||||
filters = {'device_id': [router_id],
|
||||
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
|
||||
return self._core_plugin.get_ports(context.elevated(), filters)
|
||||
|
||||
def add_csnat_router_interface_port(
|
||||
self, context, router_id, network_id, subnet_payload):
|
||||
"""Function to create SNAT router interface ports."""
|
||||
snat_port = self._core_plugin.create_port(
|
||||
context.elevated(),
|
||||
{'port': {'tenant_id': '',
|
||||
'network_id': network_id,
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': [subnet_payload],
|
||||
'device_id': router_id,
|
||||
'device_owner': DEVICE_OWNER_DVR_SNAT,
|
||||
'admin_state_up': True,
|
||||
'name': ''}})
|
||||
if snat_port:
|
||||
return self._populate_subnet_for_ports(context, [snat_port])
|
||||
msg = _("Unable to create the SNAT Interface Port")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
|
||||
def create_snat_intf_ports_if_not_exists(
|
||||
self, context, router_id):
|
||||
"""Function to return the snat interface port list.
|
||||
|
||||
This function will return the snat interface port list
|
||||
if it exists. If the port does not exist it will create
|
||||
new ports and then return the list.
|
||||
"""
|
||||
port_list = self.get_snat_interface_ports_for_router(
|
||||
context, router_id)
|
||||
if port_list:
|
||||
self._populate_subnet_for_ports(context, port_list)
|
||||
return port_list
|
||||
admin_ctx = context.elevated()
|
||||
port_list = []
|
||||
filters = {
|
||||
'device_id': [router_id],
|
||||
'device_owner': [DEVICE_OWNER_DVR_INTERFACE]}
|
||||
int_ports = self._core_plugin.get_ports(admin_ctx, filters)
|
||||
LOG.info(_('SNAT interface port list does not exist,'
|
||||
' so create one: %s'), port_list)
|
||||
for intf in int_ports:
|
||||
if 'fixed_ips' in intf and intf['fixed_ips']:
|
||||
# Passing the subnet for the port to make sure the IP's
|
||||
# are assigned on the right subnet if multiple subnet
|
||||
# exists
|
||||
intf_subnet = intf['fixed_ips'][0]['subnet_id']
|
||||
port_data = {
|
||||
'tenant_id': '',
|
||||
'network_id': intf['network_id'],
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': [{'subnet_id': intf_subnet}],
|
||||
'device_id': router_id,
|
||||
'device_owner': DEVICE_OWNER_DVR_SNAT,
|
||||
'admin_state_up': True,
|
||||
'name': ''
|
||||
}
|
||||
snat_port = self._core_plugin.create_port(
|
||||
admin_ctx, {'port': port_data})
|
||||
if not snat_port:
|
||||
msg = _("Unable to create the SNAT Interface Port")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
port_list.append(snat_port)
|
||||
if port_list:
|
||||
self._populate_subnet_for_ports(context, port_list)
|
||||
return port_list
|
||||
|
||||
def l3_agent_notify_for_vmarp_table(self, context, port_id, action):
|
||||
"""Function provides notification to L3 agent.
|
||||
|
||||
Function provides the details of the VM ARP to the
|
||||
L3 agent when a Nova instance gets created or deleted.
|
||||
"""
|
||||
port_dict = self._core_plugin._get_port(context, port_id)
|
||||
if "compute:" not in port_dict['device_owner']:
|
||||
return
|
||||
ip_address = port_dict['fixed_ips'][0]['ip_address']
|
||||
subnet = port_dict['fixed_ips'][0]['subnet_id']
|
||||
filters = {'fixed_ips': {'subnet_id': [subnet]}}
|
||||
ports = self._core_plugin.get_ports(context, filters=filters)
|
||||
for port in ports:
|
||||
if port['device_owner'] == DEVICE_OWNER_DVR_INTERFACE:
|
||||
router_id = port['device_id']
|
||||
router_dict = self._get_router(context, router_id)
|
||||
if router_dict.extra_attributes.distributed:
|
||||
arp_table = {'ip_address': ip_address,
|
||||
'mac_address': port_dict['mac_address'],
|
||||
'subnet_id': subnet}
|
||||
if action == "add":
|
||||
notify_action = self.l3_rpc_notifier.add_arp_entry
|
||||
elif action == "del":
|
||||
notify_action = self.l3_rpc_notifier.del_arp_entry
|
||||
notify_action(context, router_id, arp_table)
|
||||
return
|
||||
|
||||
def delete_csnat_router_interface_ports(self, context,
|
||||
router, subnet_id=None):
|
||||
# Each csnat router interface port is associated
|
||||
# with a subnet, so we need to pass the subnet id to
|
||||
# delete the right ports.
|
||||
admin_ctx = context.elevated()
|
||||
device_filter = {
|
||||
'device_id': [router['id']],
|
||||
'device_owner': [DEVICE_OWNER_DVR_SNAT]}
|
||||
c_snat_ports = self._core_plugin.get_ports(
|
||||
admin_ctx, filters=device_filter)
|
||||
for p in c_snat_ports:
|
||||
if subnet_id is None:
|
||||
self._core_plugin.delete_port(admin_ctx,
|
||||
p['id'],
|
||||
l3_port_check=False)
|
||||
else:
|
||||
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
|
||||
LOG.info(_("Subnet matches: %s"), subnet_id)
|
||||
self._core_plugin.delete_port(admin_ctx,
|
||||
p['id'],
|
||||
l3_port_check=False)
|
||||
break
|
||||
|
||||
|
||||
def _is_distributed_router(router):
|
||||
"""Return True if router to be handled is distributed."""
|
||||
try:
|
||||
# See if router is a DB object first
|
||||
requested_router_type = router.extra_attributes.distributed
|
||||
except AttributeError:
|
||||
# if not, try to see if it is a request body
|
||||
requested_router_type = router.get('distributed')
|
||||
if attributes.is_attr_set(requested_router_type):
|
||||
return requested_router_type
|
||||
return cfg.CONF.router_distributed
|
|
@ -1,276 +0,0 @@
|
|||
#
|
||||
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.common import constants as q_const
|
||||
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import l3_gwmode_db # noqa
|
||||
from neutron.extensions import l3agentscheduler
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L3_DVRsch_db_mixin(l3_db.L3_NAT_db_mixin,
|
||||
l3agent_sch_db.L3AgentSchedulerDbMixin):
|
||||
"""Mixin class for L3 DVR scheduler.
|
||||
|
||||
@l3_db.L3_NAT_db_mixin db mixin class for L3
|
||||
@l3agent_sch_db.L3AgentSchedulerDbMixin
|
||||
"""
|
||||
def dvr_update_router_addvm(self, context, port):
|
||||
ips = port['fixed_ips']
|
||||
for ip in ips:
|
||||
subnet = ip['subnet_id']
|
||||
filter_sub = {'fixed_ips': {'subnet_id': [subnet]},
|
||||
'device_owner':
|
||||
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
|
||||
router_id = None
|
||||
ports = self._core_plugin.get_ports(context,
|
||||
filters=filter_sub)
|
||||
for port in ports:
|
||||
router_id = port['device_id']
|
||||
router_dict = self._get_router(context, router_id)
|
||||
if router_dict.get('distributed', False):
|
||||
payload = {'subnet_id': subnet}
|
||||
self.l3_rpc_notifier.routers_updated(
|
||||
context, [router_id], None, payload)
|
||||
break
|
||||
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
|
||||
|
||||
def get_dvrrouters_by_vmportid(self, context, port_id):
|
||||
"""Gets the dvr routers on vmport subnets."""
|
||||
router_ids = set()
|
||||
port_dict = self._core_plugin._get_port(context, port_id)
|
||||
fixed_ips = port_dict['fixed_ips']
|
||||
for fixedip in fixed_ips:
|
||||
vm_subnet = fixedip['subnet_id']
|
||||
filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
|
||||
'device_owner':
|
||||
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
|
||||
subnetports = self._core_plugin.get_ports(context,
|
||||
filters=filter_sub)
|
||||
for subnetport in subnetports:
|
||||
routerid = subnetport['device_id']
|
||||
router_ids.add(routerid)
|
||||
return router_ids
|
||||
|
||||
def get_subnetids_on_router(self, context, router_id):
|
||||
"""Only get subnet IDs for interfaces that are
|
||||
attached to the given router.
|
||||
"""
|
||||
subnet_ids = set()
|
||||
filter_rtr = {'device_id': [router_id]}
|
||||
int_ports = self._core_plugin.get_ports(context,
|
||||
filters=filter_rtr)
|
||||
for int_port in int_ports:
|
||||
int_ips = int_port['fixed_ips']
|
||||
int_subnet = int_ips[0]['subnet_id']
|
||||
subnet_ids.add(int_subnet)
|
||||
return subnet_ids
|
||||
|
||||
def check_vm_exists_onsubnet(self, context, host, port_id, subnet_id):
|
||||
"""Check if there is any vm exists on the subnet_id."""
|
||||
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
|
||||
ports = self._core_plugin.get_ports(context,
|
||||
filters=filter_sub)
|
||||
for port in ports:
|
||||
if ("compute:" in port['device_owner']
|
||||
and port['status'] == 'ACTIVE'
|
||||
and port['binding:host_id'] == host
|
||||
and port['id'] != port_id
|
||||
):
|
||||
LOG.debug('DVR- Vm exists for subnet %(subnet_id)s on host '
|
||||
'%(host)s', {'subnet_id': subnet_id,
|
||||
'host': host})
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_namespace_onhost(self, context, host, router_id):
|
||||
"""Delete the given router namespace on the host."""
|
||||
agent = self._core_plugin._get_agent_by_type_and_host(
|
||||
context, q_const.AGENT_TYPE_L3, host)
|
||||
agent_id = str(agent.id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
bindings = (context.session.
|
||||
query(l3agent_sch_db.RouterL3AgentBinding).
|
||||
filter_by(router_id=router_id))
|
||||
for bind in bindings:
|
||||
if bind.l3_agent_id == agent_id:
|
||||
context.session.delete(bind)
|
||||
break
|
||||
self.l3_rpc_notifier.router_removed_from_agent(context,
|
||||
router_id,
|
||||
host)
|
||||
LOG.debug('Deleted router %(router_id)s on agent.id %(id)s',
|
||||
{'router_id': router_id,
|
||||
'id': agent.id})
|
||||
|
||||
def dvr_deletens_ifnovm(self, context, port_id):
|
||||
"""Delete the DVR namespace if no VM exists."""
|
||||
router_ids = self.get_dvrrouters_by_vmportid(context, port_id)
|
||||
port_host = self._core_plugin.get_bindinghost_by_portid(port_id)
|
||||
if not router_ids:
|
||||
LOG.debug('No namespaces available for this port %(port)s '
|
||||
'on host %(host)s', {'port': port_id,
|
||||
'host': port_host})
|
||||
return
|
||||
for router_id in router_ids:
|
||||
subnet_ids = self.get_subnetids_on_router(context, router_id)
|
||||
for subnet in subnet_ids:
|
||||
if self.check_vm_exists_onsubnet(context,
|
||||
port_host,
|
||||
port_id,
|
||||
subnet):
|
||||
return
|
||||
filter_rtr = {'device_id': [router_id],
|
||||
'device_owner':
|
||||
[q_const.DEVICE_OWNER_DVR_INTERFACE]}
|
||||
int_ports = self._core_plugin.get_ports(context,
|
||||
filters=filter_rtr)
|
||||
for prt in int_ports:
|
||||
dvr_binding = (self._core_plugin.
|
||||
get_dvr_port_binding_by_host(context,
|
||||
prt['id'],
|
||||
port_host))
|
||||
if dvr_binding:
|
||||
# unbind this port from router
|
||||
dvr_binding['router_id'] = None
|
||||
dvr_binding.update(dvr_binding)
|
||||
self.delete_namespace_onhost(context, port_host, router_id)
|
||||
LOG.debug('Deleted router namespace %(router_id)s '
|
||||
'on host %(host)s', {'router_id': router_id,
|
||||
'host': port_host})
|
||||
|
||||
def bind_snat_router(self, context, router_id, chosen_agent):
|
||||
"""Bind the router to the chosen l3 agent."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
binding = l3agent_sch_db.CentralizedSnatL3AgentBinding()
|
||||
binding.l3_agent = chosen_agent
|
||||
binding.router_id = router_id
|
||||
context.session.add(binding)
|
||||
LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
|
||||
'%(agent_id)s', {'router_id': router_id,
|
||||
'agent_id': chosen_agent.id})
|
||||
|
||||
def bind_dvrrouter_servicenode(self, context, router_id,
|
||||
chosen_snat_agent):
|
||||
"""Bind the IR router to service node if not already hosted."""
|
||||
query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
|
||||
filter_by(router_id=router_id))
|
||||
for bind in query:
|
||||
if bind.l3_agent_id == chosen_snat_agent.id:
|
||||
LOG.debug('Distributed Router %(router_id)s already hosted '
|
||||
'on snat l3_agent %(id)s',
|
||||
{'router_id': router_id, 'id': chosen_snat_agent.id})
|
||||
return
|
||||
|
||||
LOG.debug('Binding the distributed router %(router_id)s to '
|
||||
'the snat agent %(id)s',
|
||||
{'router_id': router_id,
|
||||
'id': chosen_snat_agent.id})
|
||||
self.bind_router(context, router_id, chosen_snat_agent)
|
||||
|
||||
def bind_snat_servicenode(self, context, router_id, snat_candidates):
|
||||
"""Bind the snat router to the chosen l3 service agent."""
|
||||
chosen_snat_agent = random.choice(snat_candidates)
|
||||
self.bind_snat_router(context, router_id, chosen_snat_agent)
|
||||
|
||||
def unbind_snat_servicenode(self, context, router_id):
|
||||
"""Unbind the snat router to the chosen l3 service agent."""
|
||||
vm_exists = False
|
||||
agent_id = None
|
||||
vm_ports = []
|
||||
host = None
|
||||
with context.session.begin(subtransactions=True):
|
||||
query = (context.session.
|
||||
query(l3agent_sch_db.CentralizedSnatL3AgentBinding).
|
||||
filter_by(router_id=router_id))
|
||||
try:
|
||||
binding = query.one()
|
||||
except exc.NoResultFound:
|
||||
LOG.debug('no snat router is binding entry found '
|
||||
'%(router_id)s', {'router_id': router_id})
|
||||
return
|
||||
|
||||
host = binding.l3_agent.host
|
||||
subnet_ids = self.get_subnetids_on_router(context, router_id)
|
||||
for subnet in subnet_ids:
|
||||
vm_ports = (
|
||||
self._core_plugin.get_compute_ports_on_host_by_subnet(
|
||||
context, host, subnet))
|
||||
if vm_ports:
|
||||
vm_exists = True
|
||||
LOG.debug('vm exists on the snat enabled l3_agent '
|
||||
'host %(host)s and router_id '
|
||||
'%(router_id)s', {'host': host,
|
||||
'router_id':
|
||||
router_id})
|
||||
break
|
||||
agent_id = binding.l3_agent_id
|
||||
LOG.debug('Delete the binding the SNAT router %(router_id)s '
|
||||
'from agent %(id)s', {'router_id': router_id,
|
||||
'id': agent_id})
|
||||
context.session.delete(binding)
|
||||
|
||||
if not vm_exists:
|
||||
query = (context.session.
|
||||
query(l3agent_sch_db.RouterL3AgentBinding).
|
||||
filter_by(router_id=router_id))
|
||||
for bind in query:
|
||||
if bind.l3_agent_id == agent_id:
|
||||
context.session.delete(bind)
|
||||
self.l3_rpc_notifier.router_removed_from_agent(
|
||||
context, router_id, host)
|
||||
LOG.debug('Removed the binding for router '
|
||||
'%(router_id)s from agent %(id)s',
|
||||
{'router_id': router_id, 'id': agent_id})
|
||||
break
|
||||
|
||||
def schedule_snat_router(self, plugin, context, router_id, gw_exists):
|
||||
"""Schedule the snat router on l3 service agent."""
|
||||
sync_router = plugin.get_router(context, router_id)
|
||||
if gw_exists:
|
||||
query = (context.session.
|
||||
query(l3agent_sch_db.CentralizedSnatL3AgentBinding).
|
||||
filter_by(router_id=router_id))
|
||||
for bind in query:
|
||||
agt_id = bind.l3_agent_id
|
||||
LOG.debug('SNAT Router %(router_id)s has already been '
|
||||
'hosted by L3 agent '
|
||||
'%(agent_id)s', {'router_id': router_id,
|
||||
'agent_id': agt_id})
|
||||
self.bind_dvrrouter_servicenode(context,
|
||||
router_id,
|
||||
bind.l3_agent)
|
||||
return
|
||||
active_l3_agents = plugin.get_l3_agents(context, active=True)
|
||||
if not active_l3_agents:
|
||||
LOG.warn(_('No active L3 agents'))
|
||||
return
|
||||
snat_candidates = plugin.get_snat_candidates(sync_router,
|
||||
active_l3_agents)
|
||||
if snat_candidates:
|
||||
self.bind_snat_servicenode(context, router_id, snat_candidates)
|
||||
else:
|
||||
raise (l3agentscheduler.
|
||||
NoSnatEnabledL3Agent(router_id=router_id))
|
||||
else:
|
||||
self.unbind_snat_servicenode(context, router_id)
|
|
@ -1,85 +0,0 @@
|
|||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.extensions import l3
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
|
||||
|
||||
# Modify the Router Data Model adding the enable_snat attribute
|
||||
setattr(l3_db.Router, 'enable_snat',
|
||||
sa.Column(sa.Boolean, default=True, nullable=False))
|
||||
|
||||
|
||||
class L3_NAT_db_mixin(l3_db.L3_NAT_db_mixin):
|
||||
"""Mixin class to add configurable gateway modes."""
|
||||
|
||||
# Register dict extend functions for ports and networks
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
l3.ROUTERS, ['_extend_router_dict_gw_mode'])
|
||||
|
||||
def _extend_router_dict_gw_mode(self, router_res, router_db):
|
||||
if router_db.gw_port_id:
|
||||
nw_id = router_db.gw_port['network_id']
|
||||
router_res[EXTERNAL_GW_INFO] = {
|
||||
'network_id': nw_id,
|
||||
'enable_snat': router_db.enable_snat}
|
||||
|
||||
def _update_router_gw_info(self, context, router_id, info, router=None):
|
||||
# Load the router only if necessary
|
||||
if not router:
|
||||
router = self._get_router(context, router_id)
|
||||
# if enable_snat is not specified use the value
|
||||
# stored in the database (default:True)
|
||||
enable_snat = not info or info.get('enable_snat', router.enable_snat)
|
||||
with context.session.begin(subtransactions=True):
|
||||
router.enable_snat = enable_snat
|
||||
|
||||
# Calls superclass, pass router db object for avoiding re-loading
|
||||
super(L3_NAT_db_mixin, self)._update_router_gw_info(
|
||||
context, router_id, info, router=router)
|
||||
# Returning the router might come back useful if this
|
||||
# method is overriden in child classes
|
||||
return router
|
||||
|
||||
def _build_routers_list(self, context, routers, gw_ports):
|
||||
gw_port_id_gw_port_dict = {}
|
||||
for gw_port in gw_ports:
|
||||
gw_port_id_gw_port_dict[gw_port['id']] = gw_port
|
||||
for rtr in routers:
|
||||
gw_port_id = rtr['gw_port_id']
|
||||
if gw_port_id:
|
||||
rtr['gw_port'] = gw_port_id_gw_port_dict[gw_port_id]
|
||||
# Add enable_snat key
|
||||
rtr['enable_snat'] = rtr[EXTERNAL_GW_INFO]['enable_snat']
|
||||
query = (context.session.
|
||||
query(l3agent_sch_db.CentralizedSnatL3AgentBinding).
|
||||
filter_by(router_id=rtr['id']))
|
||||
try:
|
||||
binding = query.one()
|
||||
rtr['gw_port_host'] = binding.l3_agent.host
|
||||
except exc.NoResultFound:
|
||||
rtr['gw_port_host'] = None
|
||||
LOG.debug('no snat router is binding entry '
|
||||
'found router_id %s', rtr['id'])
|
||||
return routers
|
|
@ -1,198 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import utils
|
||||
from neutron import context as neutron_context
|
||||
from neutron.extensions import l3
|
||||
from neutron.extensions import portbindings
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import jsonutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as plugin_constants
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L3RpcCallbackMixin(object):
|
||||
"""A mix-in that enable L3 agent rpc support in plugin implementations."""
|
||||
|
||||
def sync_routers(self, context, **kwargs):
|
||||
"""Sync routers according to filters to a specific agent.
|
||||
|
||||
@param context: contain user information
|
||||
@param kwargs: host, router_ids
|
||||
@return: a list of routers
|
||||
with their interfaces and floating_ips
|
||||
"""
|
||||
router_ids = kwargs.get('router_ids')
|
||||
host = kwargs.get('host')
|
||||
context = neutron_context.get_admin_context()
|
||||
l3plugin = manager.NeutronManager.get_service_plugins()[
|
||||
plugin_constants.L3_ROUTER_NAT]
|
||||
if not l3plugin:
|
||||
routers = {}
|
||||
LOG.error(_('No plugin for L3 routing registered! Will reply '
|
||||
'to l3 agent with empty router dictionary.'))
|
||||
elif utils.is_extension_supported(
|
||||
l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
|
||||
if cfg.CONF.router_auto_schedule:
|
||||
l3plugin.auto_schedule_routers(context, host, router_ids)
|
||||
routers = l3plugin.list_active_sync_routers_on_active_l3_agent(
|
||||
context, host, router_ids)
|
||||
else:
|
||||
routers = l3plugin.get_sync_data(context, router_ids)
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
if utils.is_extension_supported(
|
||||
plugin, constants.PORT_BINDING_EXT_ALIAS):
|
||||
self._ensure_host_set_on_ports(context, plugin, host, routers)
|
||||
LOG.debug(_("Routers returned to l3 agent:\n %s"),
|
||||
jsonutils.dumps(routers, indent=5))
|
||||
return routers
|
||||
|
||||
def _ensure_host_set_on_ports(self, context, plugin, host, routers):
|
||||
for router in routers:
|
||||
LOG.debug(_("Checking router: %(id)s for host: %(host)s"),
|
||||
{'id': router['id'], 'host': host})
|
||||
self._ensure_host_set_on_port(context, plugin, host,
|
||||
router.get('gw_port'),
|
||||
router['id'])
|
||||
for interface in router.get(constants.INTERFACE_KEY, []):
|
||||
self._ensure_host_set_on_port(context, plugin, host,
|
||||
interface, router['id'])
|
||||
|
||||
def _ensure_host_set_on_port(self, context, plugin, host, port,
|
||||
router_id=None):
|
||||
if (port and
|
||||
port.get('device_owner') ==
|
||||
constants.DEVICE_OWNER_DVR_INTERFACE):
|
||||
# Ports that are DVR interfaces have multiple bindings (based on
|
||||
# of hosts on which DVR router interfaces are spawned). Such
|
||||
# bindings are created/updated here by invoking
|
||||
# update_dvr_port_binding
|
||||
plugin.update_dvr_port_binding(context, port['id'],
|
||||
{'port':
|
||||
{portbindings.HOST_ID: host,
|
||||
'device_id': router_id}
|
||||
})
|
||||
elif (port and
|
||||
(port.get(portbindings.HOST_ID) != host or
|
||||
port.get(portbindings.VIF_TYPE) ==
|
||||
portbindings.VIF_TYPE_BINDING_FAILED)):
|
||||
# All ports, including ports created for SNAT'ing for
|
||||
# DVR are handled here
|
||||
plugin.update_port(context, port['id'],
|
||||
{'port': {portbindings.HOST_ID: host}})
|
||||
|
||||
|
||||
def get_external_network_id(self, context, **kwargs):
|
||||
"""Get one external network id for l3 agent.
|
||||
|
||||
l3 agent expects only on external network when it performs
|
||||
this query.
|
||||
"""
|
||||
context = neutron_context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
net_id = plugin.get_external_network_id(context)
|
||||
LOG.debug(_("External network ID returned to l3 agent: %s"),
|
||||
net_id)
|
||||
return net_id
|
||||
|
||||
def update_floatingip_statuses(self, context, router_id, fip_statuses):
|
||||
"""Update operational status for a floating IP."""
|
||||
l3_plugin = manager.NeutronManager.get_service_plugins()[
|
||||
plugin_constants.L3_ROUTER_NAT]
|
||||
with context.session.begin(subtransactions=True):
|
||||
for (floatingip_id, status) in fip_statuses.iteritems():
|
||||
LOG.debug(_("New status for floating IP %(floatingip_id)s: "
|
||||
"%(status)s"), {'floatingip_id': floatingip_id,
|
||||
'status': status})
|
||||
try:
|
||||
l3_plugin.update_floatingip_status(context,
|
||||
floatingip_id,
|
||||
status)
|
||||
except l3.FloatingIPNotFound:
|
||||
LOG.debug(_("Floating IP: %s no longer present."),
|
||||
floatingip_id)
|
||||
# Find all floating IPs known to have been the given router
|
||||
# for which an update was not received. Set them DOWN mercilessly
|
||||
# This situation might occur for some asynchronous backends if
|
||||
# notifications were missed
|
||||
known_router_fips = l3_plugin.get_floatingips(
|
||||
context, {'last_known_router_id': [router_id]})
|
||||
# Consider only floating ips which were disassociated in the API
|
||||
# FIXME(salv-orlando): Filtering in code should be avoided.
|
||||
# the plugin should offer a way to specify a null filter
|
||||
fips_to_disable = (fip['id'] for fip in known_router_fips
|
||||
if not fip['router_id'])
|
||||
for fip_id in fips_to_disable:
|
||||
l3_plugin.update_floatingip_status(
|
||||
context, fip_id, constants.FLOATINGIP_STATUS_DOWN)
|
||||
|
||||
def get_ports_by_subnet(self, context, **kwargs):
|
||||
"""DVR: RPC called by dvr-agent to get all ports for subnet."""
|
||||
subnet_id = kwargs.get('subnet_id')
|
||||
LOG.debug("DVR: subnet_id: %s", subnet_id)
|
||||
filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
return plugin.get_ports(context, filters=filters)
|
||||
|
||||
def get_agent_gateway_port(self, context, **kwargs):
|
||||
"""Get Agent Gateway port for FIP.
|
||||
|
||||
l3 agent expects an Agent Gateway Port to be returned
|
||||
for this query.
|
||||
"""
|
||||
network_id = kwargs.get('network_id')
|
||||
host = kwargs.get('host')
|
||||
context = neutron_context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
l3plugin = manager.NeutronManager.get_service_plugins()[
|
||||
plugin_constants.L3_ROUTER_NAT]
|
||||
agent_port = l3plugin.create_fip_agent_gw_port_if_not_exists(
|
||||
context, network_id, host)
|
||||
self._ensure_host_set_on_port(context, plugin, host,
|
||||
agent_port)
|
||||
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
|
||||
'host %(host)s', {'agent_port': agent_port,
|
||||
'host': host})
|
||||
return agent_port
|
||||
|
||||
def get_snat_router_interface_ports(self, context, **kwargs):
|
||||
"""Get SNAT serviced Router Port List.
|
||||
|
||||
The Service Node that hosts the SNAT service requires
|
||||
the ports to service the router interfaces.
|
||||
This function will check if any available ports, if not
|
||||
it will create ports on the routers interfaces and
|
||||
will send a list to the L3 agent.
|
||||
"""
|
||||
router_id = kwargs.get('router_id')
|
||||
host = kwargs.get('host')
|
||||
context = neutron_context.get_admin_context()
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
l3plugin = manager.NeutronManager.get_service_plugins()[
|
||||
plugin_constants.L3_ROUTER_NAT]
|
||||
snat_port_list = l3plugin.create_snat_intf_port_list_if_not_exists(
|
||||
context, router_id)
|
||||
for p in snat_port_list:
|
||||
self._ensure_host_set_on_port(context, plugin, host, p)
|
||||
LOG.debug('SNAT interface ports returned : %(snat_port_list)s '
|
||||
'and on host %(host)s', {'snat_port_list': snat_port_list,
|
||||
'host': host})
|
||||
return snat_port_list
|
|
@ -1,13 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,800 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo.db import exception
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.orm import validates
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import db_base_plugin_v2 as base_db
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import servicetype_db as st_db
|
||||
from neutron.extensions import loadbalancer
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import excutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.common import constants
|
||||
from neutron.services.loadbalancer import constants as lb_const
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SessionPersistence(model_base.BASEV2):
|
||||
|
||||
vip_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("vips.id"),
|
||||
primary_key=True)
|
||||
type = sa.Column(sa.Enum("SOURCE_IP",
|
||||
"HTTP_COOKIE",
|
||||
"APP_COOKIE",
|
||||
name="sesssionpersistences_type"),
|
||||
nullable=False)
|
||||
cookie_name = sa.Column(sa.String(1024))
|
||||
|
||||
|
||||
class PoolStatistics(model_base.BASEV2):
|
||||
"""Represents pool statistics."""
|
||||
|
||||
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
|
||||
primary_key=True)
|
||||
bytes_in = sa.Column(sa.BigInteger, nullable=False)
|
||||
bytes_out = sa.Column(sa.BigInteger, nullable=False)
|
||||
active_connections = sa.Column(sa.BigInteger, nullable=False)
|
||||
total_connections = sa.Column(sa.BigInteger, nullable=False)
|
||||
|
||||
@validates('bytes_in', 'bytes_out',
|
||||
'active_connections', 'total_connections')
|
||||
def validate_non_negative_int(self, key, value):
|
||||
if value < 0:
|
||||
data = {'key': key, 'value': value}
|
||||
raise ValueError(_('The %(key)s field can not have '
|
||||
'negative value. '
|
||||
'Current value is %(value)d.') % data)
|
||||
return value
|
||||
|
||||
|
||||
class Vip(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant,
|
||||
models_v2.HasStatusDescription):
|
||||
"""Represents a v2 neutron loadbalancer vip."""
|
||||
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(255))
|
||||
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
|
||||
protocol_port = sa.Column(sa.Integer, nullable=False)
|
||||
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
|
||||
nullable=False)
|
||||
pool_id = sa.Column(sa.String(36), nullable=False, unique=True)
|
||||
session_persistence = orm.relationship(SessionPersistence,
|
||||
uselist=False,
|
||||
backref="vips",
|
||||
cascade="all, delete-orphan")
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
connection_limit = sa.Column(sa.Integer)
|
||||
port = orm.relationship(models_v2.Port)
|
||||
|
||||
|
||||
class Member(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant,
|
||||
models_v2.HasStatusDescription):
|
||||
"""Represents a v2 neutron loadbalancer member."""
|
||||
|
||||
__table_args__ = (
|
||||
sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
|
||||
name='uniq_member0pool_id0address0port'),
|
||||
)
|
||||
pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
|
||||
nullable=False)
|
||||
address = sa.Column(sa.String(64), nullable=False)
|
||||
protocol_port = sa.Column(sa.Integer, nullable=False)
|
||||
weight = sa.Column(sa.Integer, nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
|
||||
|
||||
class Pool(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant,
|
||||
models_v2.HasStatusDescription):
|
||||
"""Represents a v2 neutron loadbalancer pool."""
|
||||
|
||||
vip_id = sa.Column(sa.String(36), sa.ForeignKey("vips.id"))
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(255))
|
||||
subnet_id = sa.Column(sa.String(36), nullable=False)
|
||||
protocol = sa.Column(sa.Enum("HTTP", "HTTPS", "TCP", name="lb_protocols"),
|
||||
nullable=False)
|
||||
lb_method = sa.Column(sa.Enum("ROUND_ROBIN",
|
||||
"LEAST_CONNECTIONS",
|
||||
"SOURCE_IP",
|
||||
name="pools_lb_method"),
|
||||
nullable=False)
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
stats = orm.relationship(PoolStatistics,
|
||||
uselist=False,
|
||||
backref="pools",
|
||||
cascade="all, delete-orphan")
|
||||
members = orm.relationship(Member, backref="pools",
|
||||
cascade="all, delete-orphan")
|
||||
monitors = orm.relationship("PoolMonitorAssociation", backref="pools",
|
||||
cascade="all, delete-orphan")
|
||||
vip = orm.relationship(Vip, backref='pool')
|
||||
|
||||
provider = orm.relationship(
|
||||
st_db.ProviderResourceAssociation,
|
||||
uselist=False,
|
||||
lazy="joined",
|
||||
primaryjoin="Pool.id==ProviderResourceAssociation.resource_id",
|
||||
foreign_keys=[st_db.ProviderResourceAssociation.resource_id]
|
||||
)
|
||||
|
||||
|
||||
class HealthMonitor(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
"""Represents a v2 neutron loadbalancer healthmonitor."""
|
||||
|
||||
type = sa.Column(sa.Enum("PING", "TCP", "HTTP", "HTTPS",
|
||||
name="healthmontiors_type"),
|
||||
nullable=False)
|
||||
delay = sa.Column(sa.Integer, nullable=False)
|
||||
timeout = sa.Column(sa.Integer, nullable=False)
|
||||
max_retries = sa.Column(sa.Integer, nullable=False)
|
||||
http_method = sa.Column(sa.String(16))
|
||||
url_path = sa.Column(sa.String(255))
|
||||
expected_codes = sa.Column(sa.String(64))
|
||||
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
|
||||
|
||||
pools = orm.relationship(
|
||||
"PoolMonitorAssociation", backref="healthmonitor",
|
||||
cascade="all", lazy="joined"
|
||||
)
|
||||
|
||||
|
||||
class PoolMonitorAssociation(model_base.BASEV2,
|
||||
models_v2.HasStatusDescription):
|
||||
"""Many-to-many association between pool and healthMonitor classes."""
|
||||
|
||||
pool_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("pools.id"),
|
||||
primary_key=True)
|
||||
monitor_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("healthmonitors.id"),
|
||||
primary_key=True)
|
||||
|
||||
|
||||
class LoadBalancerPluginDb(loadbalancer.LoadBalancerPluginBase,
|
||||
base_db.CommonDbMixin):
|
||||
"""Wraps loadbalancer with SQLAlchemy models.
|
||||
|
||||
A class that wraps the implementation of the Neutron loadbalancer
|
||||
plugin database access interface using SQLAlchemy models.
|
||||
"""
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
|
||||
def update_status(self, context, model, id, status,
|
||||
status_description=None):
|
||||
with context.session.begin(subtransactions=True):
|
||||
if issubclass(model, Vip):
|
||||
try:
|
||||
v_db = (self._model_query(context, model).
|
||||
filter(model.id == id).
|
||||
options(orm.noload('port')).
|
||||
one())
|
||||
except exc.NoResultFound:
|
||||
raise loadbalancer.VipNotFound(vip_id=id)
|
||||
else:
|
||||
v_db = self._get_resource(context, model, id)
|
||||
if v_db.status != status:
|
||||
v_db.status = status
|
||||
# update status_description in two cases:
|
||||
# - new value is passed
|
||||
# - old value is not None (needs to be updated anyway)
|
||||
if status_description or v_db['status_description']:
|
||||
v_db.status_description = status_description
|
||||
|
||||
def _get_resource(self, context, model, id):
|
||||
try:
|
||||
r = self._get_by_id(context, model, id)
|
||||
except exc.NoResultFound:
|
||||
with excutils.save_and_reraise_exception(reraise=False) as ctx:
|
||||
if issubclass(model, Vip):
|
||||
raise loadbalancer.VipNotFound(vip_id=id)
|
||||
elif issubclass(model, Pool):
|
||||
raise loadbalancer.PoolNotFound(pool_id=id)
|
||||
elif issubclass(model, Member):
|
||||
raise loadbalancer.MemberNotFound(member_id=id)
|
||||
elif issubclass(model, HealthMonitor):
|
||||
raise loadbalancer.HealthMonitorNotFound(monitor_id=id)
|
||||
ctx.reraise = True
|
||||
return r
|
||||
|
||||
def assert_modification_allowed(self, obj):
|
||||
status = getattr(obj, 'status', None)
|
||||
|
||||
if status == constants.PENDING_DELETE:
|
||||
raise loadbalancer.StateInvalid(id=id, state=status)
|
||||
|
||||
########################################################
|
||||
# VIP DB access
|
||||
def _make_vip_dict(self, vip, fields=None):
|
||||
fixed_ip = (vip.port.fixed_ips or [{}])[0]
|
||||
|
||||
res = {'id': vip['id'],
|
||||
'tenant_id': vip['tenant_id'],
|
||||
'name': vip['name'],
|
||||
'description': vip['description'],
|
||||
'subnet_id': fixed_ip.get('subnet_id'),
|
||||
'address': fixed_ip.get('ip_address'),
|
||||
'port_id': vip['port_id'],
|
||||
'protocol_port': vip['protocol_port'],
|
||||
'protocol': vip['protocol'],
|
||||
'pool_id': vip['pool_id'],
|
||||
'session_persistence': None,
|
||||
'connection_limit': vip['connection_limit'],
|
||||
'admin_state_up': vip['admin_state_up'],
|
||||
'status': vip['status'],
|
||||
'status_description': vip['status_description']}
|
||||
|
||||
if vip['session_persistence']:
|
||||
s_p = {
|
||||
'type': vip['session_persistence']['type']
|
||||
}
|
||||
|
||||
if vip['session_persistence']['type'] == 'APP_COOKIE':
|
||||
s_p['cookie_name'] = vip['session_persistence']['cookie_name']
|
||||
|
||||
res['session_persistence'] = s_p
|
||||
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _check_session_persistence_info(self, info):
|
||||
"""Performs sanity check on session persistence info.
|
||||
|
||||
:param info: Session persistence info
|
||||
"""
|
||||
if info['type'] == 'APP_COOKIE':
|
||||
if not info.get('cookie_name'):
|
||||
raise ValueError(_("'cookie_name' should be specified for this"
|
||||
" type of session persistence."))
|
||||
else:
|
||||
if 'cookie_name' in info:
|
||||
raise ValueError(_("'cookie_name' is not allowed for this type"
|
||||
" of session persistence"))
|
||||
|
||||
def _create_session_persistence_db(self, session_info, vip_id):
|
||||
self._check_session_persistence_info(session_info)
|
||||
|
||||
sesspersist_db = SessionPersistence(
|
||||
type=session_info['type'],
|
||||
cookie_name=session_info.get('cookie_name'),
|
||||
vip_id=vip_id)
|
||||
return sesspersist_db
|
||||
|
||||
def _update_vip_session_persistence(self, context, vip_id, info):
|
||||
self._check_session_persistence_info(info)
|
||||
|
||||
vip = self._get_resource(context, Vip, vip_id)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
# Update sessionPersistence table
|
||||
sess_qry = context.session.query(SessionPersistence)
|
||||
sesspersist_db = sess_qry.filter_by(vip_id=vip_id).first()
|
||||
|
||||
# Insert a None cookie_info if it is not present to overwrite an
|
||||
# an existing value in the database.
|
||||
if 'cookie_name' not in info:
|
||||
info['cookie_name'] = None
|
||||
|
||||
if sesspersist_db:
|
||||
sesspersist_db.update(info)
|
||||
else:
|
||||
sesspersist_db = SessionPersistence(
|
||||
type=info['type'],
|
||||
cookie_name=info['cookie_name'],
|
||||
vip_id=vip_id)
|
||||
context.session.add(sesspersist_db)
|
||||
# Update vip table
|
||||
vip.session_persistence = sesspersist_db
|
||||
context.session.add(vip)
|
||||
|
||||
def _delete_session_persistence(self, context, vip_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
sess_qry = context.session.query(SessionPersistence)
|
||||
sess_qry.filter_by(vip_id=vip_id).delete()
|
||||
|
||||
def _create_port_for_vip(self, context, vip_db, subnet_id, ip_address):
|
||||
# resolve subnet and create port
|
||||
subnet = self._core_plugin.get_subnet(context, subnet_id)
|
||||
fixed_ip = {'subnet_id': subnet['id']}
|
||||
if ip_address and ip_address != attributes.ATTR_NOT_SPECIFIED:
|
||||
fixed_ip['ip_address'] = ip_address
|
||||
|
||||
port_data = {
|
||||
'tenant_id': vip_db.tenant_id,
|
||||
'name': 'vip-' + vip_db.id,
|
||||
'network_id': subnet['network_id'],
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'admin_state_up': False,
|
||||
'device_id': '',
|
||||
'device_owner': '',
|
||||
'fixed_ips': [fixed_ip]
|
||||
}
|
||||
|
||||
port = self._core_plugin.create_port(context, {'port': port_data})
|
||||
vip_db.port_id = port['id']
|
||||
|
||||
def create_vip(self, context, vip):
|
||||
v = vip['vip']
|
||||
tenant_id = self._get_tenant_id_for_create(context, v)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
if v['pool_id']:
|
||||
pool = self._get_resource(context, Pool, v['pool_id'])
|
||||
# validate that the pool has same tenant
|
||||
if pool['tenant_id'] != tenant_id:
|
||||
raise n_exc.NotAuthorized()
|
||||
# validate that the pool has same protocol
|
||||
if pool['protocol'] != v['protocol']:
|
||||
raise loadbalancer.ProtocolMismatch(
|
||||
vip_proto=v['protocol'],
|
||||
pool_proto=pool['protocol'])
|
||||
if pool['status'] == constants.PENDING_DELETE:
|
||||
raise loadbalancer.StateInvalid(state=pool['status'],
|
||||
id=pool['id'])
|
||||
else:
|
||||
pool = None
|
||||
|
||||
vip_db = Vip(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=v['name'],
|
||||
description=v['description'],
|
||||
port_id=None,
|
||||
protocol_port=v['protocol_port'],
|
||||
protocol=v['protocol'],
|
||||
pool_id=v['pool_id'],
|
||||
connection_limit=v['connection_limit'],
|
||||
admin_state_up=v['admin_state_up'],
|
||||
status=constants.PENDING_CREATE)
|
||||
|
||||
session_info = v['session_persistence']
|
||||
|
||||
if session_info:
|
||||
s_p = self._create_session_persistence_db(
|
||||
session_info,
|
||||
vip_db['id'])
|
||||
vip_db.session_persistence = s_p
|
||||
|
||||
try:
|
||||
context.session.add(vip_db)
|
||||
context.session.flush()
|
||||
except exception.DBDuplicateEntry:
|
||||
raise loadbalancer.VipExists(pool_id=v['pool_id'])
|
||||
|
||||
# create a port to reserve address for IPAM
|
||||
self._create_port_for_vip(
|
||||
context,
|
||||
vip_db,
|
||||
v['subnet_id'],
|
||||
v.get('address')
|
||||
)
|
||||
|
||||
if pool:
|
||||
pool['vip_id'] = vip_db['id']
|
||||
|
||||
return self._make_vip_dict(vip_db)
|
||||
|
||||
def update_vip(self, context, id, vip):
|
||||
v = vip['vip']
|
||||
|
||||
sess_persist = v.pop('session_persistence', None)
|
||||
with context.session.begin(subtransactions=True):
|
||||
vip_db = self._get_resource(context, Vip, id)
|
||||
|
||||
self.assert_modification_allowed(vip_db)
|
||||
|
||||
if sess_persist:
|
||||
self._update_vip_session_persistence(context, id, sess_persist)
|
||||
else:
|
||||
self._delete_session_persistence(context, id)
|
||||
|
||||
if v:
|
||||
try:
|
||||
# in case new pool already has a vip
|
||||
# update will raise integrity error at first query
|
||||
old_pool_id = vip_db['pool_id']
|
||||
vip_db.update(v)
|
||||
# If the pool_id is changed, we need to update
|
||||
# the associated pools
|
||||
if 'pool_id' in v:
|
||||
new_pool = self._get_resource(context, Pool,
|
||||
v['pool_id'])
|
||||
self.assert_modification_allowed(new_pool)
|
||||
|
||||
# check that the pool matches the tenant_id
|
||||
if new_pool['tenant_id'] != vip_db['tenant_id']:
|
||||
raise n_exc.NotAuthorized()
|
||||
# validate that the pool has same protocol
|
||||
if new_pool['protocol'] != vip_db['protocol']:
|
||||
raise loadbalancer.ProtocolMismatch(
|
||||
vip_proto=vip_db['protocol'],
|
||||
pool_proto=new_pool['protocol'])
|
||||
if new_pool['status'] == constants.PENDING_DELETE:
|
||||
raise loadbalancer.StateInvalid(
|
||||
state=new_pool['status'],
|
||||
id=new_pool['id'])
|
||||
|
||||
if old_pool_id:
|
||||
old_pool = self._get_resource(
|
||||
context,
|
||||
Pool,
|
||||
old_pool_id
|
||||
)
|
||||
old_pool['vip_id'] = None
|
||||
|
||||
new_pool['vip_id'] = vip_db['id']
|
||||
except exception.DBDuplicateEntry:
|
||||
raise loadbalancer.VipExists(pool_id=v['pool_id'])
|
||||
|
||||
return self._make_vip_dict(vip_db)
|
||||
|
||||
def delete_vip(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
vip = self._get_resource(context, Vip, id)
|
||||
qry = context.session.query(Pool)
|
||||
for pool in qry.filter_by(vip_id=id):
|
||||
pool.update({"vip_id": None})
|
||||
|
||||
context.session.delete(vip)
|
||||
if vip.port: # this is a Neutron port
|
||||
self._core_plugin.delete_port(context, vip.port.id)
|
||||
|
||||
def get_vip(self, context, id, fields=None):
|
||||
vip = self._get_resource(context, Vip, id)
|
||||
return self._make_vip_dict(vip, fields)
|
||||
|
||||
def get_vips(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, Vip,
|
||||
self._make_vip_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
########################################################
|
||||
# Pool DB access
|
||||
def _make_pool_dict(self, pool, fields=None):
|
||||
res = {'id': pool['id'],
|
||||
'tenant_id': pool['tenant_id'],
|
||||
'name': pool['name'],
|
||||
'description': pool['description'],
|
||||
'subnet_id': pool['subnet_id'],
|
||||
'protocol': pool['protocol'],
|
||||
'vip_id': pool['vip_id'],
|
||||
'lb_method': pool['lb_method'],
|
||||
'admin_state_up': pool['admin_state_up'],
|
||||
'status': pool['status'],
|
||||
'status_description': pool['status_description'],
|
||||
'provider': ''
|
||||
}
|
||||
|
||||
if pool.provider:
|
||||
res['provider'] = pool.provider.provider_name
|
||||
|
||||
# Get the associated members
|
||||
res['members'] = [member['id'] for member in pool['members']]
|
||||
|
||||
# Get the associated health_monitors
|
||||
res['health_monitors'] = [
|
||||
monitor['monitor_id'] for monitor in pool['monitors']]
|
||||
res['health_monitors_status'] = [
|
||||
{'monitor_id': monitor['monitor_id'],
|
||||
'status': monitor['status'],
|
||||
'status_description': monitor['status_description']}
|
||||
for monitor in pool['monitors']]
|
||||
return self._fields(res, fields)
|
||||
|
||||
def update_pool_stats(self, context, pool_id, data=None):
|
||||
"""Update a pool with new stats structure."""
|
||||
data = data or {}
|
||||
with context.session.begin(subtransactions=True):
|
||||
pool_db = self._get_resource(context, Pool, pool_id)
|
||||
self.assert_modification_allowed(pool_db)
|
||||
pool_db.stats = self._create_pool_stats(context, pool_id, data)
|
||||
|
||||
for member, stats in data.get('members', {}).items():
|
||||
stats_status = stats.get(lb_const.STATS_STATUS)
|
||||
if stats_status:
|
||||
self.update_status(context, Member, member, stats_status)
|
||||
|
||||
def _create_pool_stats(self, context, pool_id, data=None):
|
||||
# This is internal method to add pool statistics. It won't
|
||||
# be exposed to API
|
||||
if not data:
|
||||
data = {}
|
||||
stats_db = PoolStatistics(
|
||||
pool_id=pool_id,
|
||||
bytes_in=data.get(lb_const.STATS_IN_BYTES, 0),
|
||||
bytes_out=data.get(lb_const.STATS_OUT_BYTES, 0),
|
||||
active_connections=data.get(lb_const.STATS_ACTIVE_CONNECTIONS, 0),
|
||||
total_connections=data.get(lb_const.STATS_TOTAL_CONNECTIONS, 0)
|
||||
)
|
||||
return stats_db
|
||||
|
||||
def _delete_pool_stats(self, context, pool_id):
|
||||
# This is internal method to delete pool statistics. It won't
|
||||
# be exposed to API
|
||||
with context.session.begin(subtransactions=True):
|
||||
stats_qry = context.session.query(PoolStatistics)
|
||||
try:
|
||||
stats = stats_qry.filter_by(pool_id=pool_id).one()
|
||||
except exc.NoResultFound:
|
||||
raise loadbalancer.PoolStatsNotFound(pool_id=pool_id)
|
||||
context.session.delete(stats)
|
||||
|
||||
def create_pool(self, context, pool):
|
||||
v = pool['pool']
|
||||
|
||||
tenant_id = self._get_tenant_id_for_create(context, v)
|
||||
with context.session.begin(subtransactions=True):
|
||||
pool_db = Pool(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
name=v['name'],
|
||||
description=v['description'],
|
||||
subnet_id=v['subnet_id'],
|
||||
protocol=v['protocol'],
|
||||
lb_method=v['lb_method'],
|
||||
admin_state_up=v['admin_state_up'],
|
||||
status=constants.PENDING_CREATE)
|
||||
pool_db.stats = self._create_pool_stats(context, pool_db['id'])
|
||||
context.session.add(pool_db)
|
||||
|
||||
return self._make_pool_dict(pool_db)
|
||||
|
||||
def update_pool(self, context, id, pool):
|
||||
p = pool['pool']
|
||||
with context.session.begin(subtransactions=True):
|
||||
pool_db = self._get_resource(context, Pool, id)
|
||||
self.assert_modification_allowed(pool_db)
|
||||
if p:
|
||||
pool_db.update(p)
|
||||
|
||||
return self._make_pool_dict(pool_db)
|
||||
|
||||
def _ensure_pool_delete_conditions(self, context, pool_id):
|
||||
if context.session.query(Vip).filter_by(pool_id=pool_id).first():
|
||||
raise loadbalancer.PoolInUse(pool_id=pool_id)
|
||||
|
||||
def delete_pool(self, context, pool_id):
|
||||
# Check if the pool is in use
|
||||
self._ensure_pool_delete_conditions(context, pool_id)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
self._delete_pool_stats(context, pool_id)
|
||||
pool_db = self._get_resource(context, Pool, pool_id)
|
||||
context.session.delete(pool_db)
|
||||
|
||||
def get_pool(self, context, id, fields=None):
|
||||
pool = self._get_resource(context, Pool, id)
|
||||
return self._make_pool_dict(pool, fields)
|
||||
|
||||
def get_pools(self, context, filters=None, fields=None):
|
||||
collection = self._model_query(context, Pool)
|
||||
collection = self._apply_filters_to_query(collection, Pool, filters)
|
||||
return [self._make_pool_dict(c, fields)
|
||||
for c in collection]
|
||||
|
||||
def stats(self, context, pool_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
pool = self._get_resource(context, Pool, pool_id)
|
||||
stats = pool['stats']
|
||||
|
||||
res = {lb_const.STATS_IN_BYTES: stats['bytes_in'],
|
||||
lb_const.STATS_OUT_BYTES: stats['bytes_out'],
|
||||
lb_const.STATS_ACTIVE_CONNECTIONS: stats['active_connections'],
|
||||
lb_const.STATS_TOTAL_CONNECTIONS: stats['total_connections']}
|
||||
return {'stats': res}
|
||||
|
||||
def create_pool_health_monitor(self, context, health_monitor, pool_id):
|
||||
monitor_id = health_monitor['health_monitor']['id']
|
||||
with context.session.begin(subtransactions=True):
|
||||
assoc_qry = context.session.query(PoolMonitorAssociation)
|
||||
assoc = assoc_qry.filter_by(pool_id=pool_id,
|
||||
monitor_id=monitor_id).first()
|
||||
if assoc:
|
||||
raise loadbalancer.PoolMonitorAssociationExists(
|
||||
monitor_id=monitor_id, pool_id=pool_id)
|
||||
|
||||
pool = self._get_resource(context, Pool, pool_id)
|
||||
|
||||
assoc = PoolMonitorAssociation(pool_id=pool_id,
|
||||
monitor_id=monitor_id,
|
||||
status=constants.PENDING_CREATE)
|
||||
pool.monitors.append(assoc)
|
||||
monitors = [monitor['monitor_id'] for monitor in pool['monitors']]
|
||||
|
||||
res = {"health_monitor": monitors}
|
||||
return res
|
||||
|
||||
def delete_pool_health_monitor(self, context, id, pool_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
assoc = self._get_pool_health_monitor(context, id, pool_id)
|
||||
pool = self._get_resource(context, Pool, pool_id)
|
||||
pool.monitors.remove(assoc)
|
||||
|
||||
def _get_pool_health_monitor(self, context, id, pool_id):
|
||||
try:
|
||||
assoc_qry = context.session.query(PoolMonitorAssociation)
|
||||
return assoc_qry.filter_by(monitor_id=id, pool_id=pool_id).one()
|
||||
except exc.NoResultFound:
|
||||
raise loadbalancer.PoolMonitorAssociationNotFound(
|
||||
monitor_id=id, pool_id=pool_id)
|
||||
|
||||
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
|
||||
pool_hm = self._get_pool_health_monitor(context, id, pool_id)
|
||||
# need to add tenant_id for admin_or_owner policy check to pass
|
||||
hm = self.get_health_monitor(context, id)
|
||||
res = {'pool_id': pool_id,
|
||||
'monitor_id': id,
|
||||
'status': pool_hm['status'],
|
||||
'status_description': pool_hm['status_description'],
|
||||
'tenant_id': hm['tenant_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def update_pool_health_monitor(self, context, id, pool_id,
|
||||
status, status_description=None):
|
||||
with context.session.begin(subtransactions=True):
|
||||
assoc = self._get_pool_health_monitor(context, id, pool_id)
|
||||
self.assert_modification_allowed(assoc)
|
||||
assoc.status = status
|
||||
assoc.status_description = status_description
|
||||
|
||||
########################################################
|
||||
# Member DB access
|
||||
def _make_member_dict(self, member, fields=None):
|
||||
res = {'id': member['id'],
|
||||
'tenant_id': member['tenant_id'],
|
||||
'pool_id': member['pool_id'],
|
||||
'address': member['address'],
|
||||
'protocol_port': member['protocol_port'],
|
||||
'weight': member['weight'],
|
||||
'admin_state_up': member['admin_state_up'],
|
||||
'status': member['status'],
|
||||
'status_description': member['status_description']}
|
||||
|
||||
return self._fields(res, fields)
|
||||
|
||||
def create_member(self, context, member):
|
||||
v = member['member']
|
||||
tenant_id = self._get_tenant_id_for_create(context, v)
|
||||
|
||||
try:
|
||||
with context.session.begin(subtransactions=True):
|
||||
# ensuring that pool exists
|
||||
self._get_resource(context, Pool, v['pool_id'])
|
||||
member_db = Member(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
pool_id=v['pool_id'],
|
||||
address=v['address'],
|
||||
protocol_port=v['protocol_port'],
|
||||
weight=v['weight'],
|
||||
admin_state_up=v['admin_state_up'],
|
||||
status=constants.PENDING_CREATE)
|
||||
context.session.add(member_db)
|
||||
return self._make_member_dict(member_db)
|
||||
except exception.DBDuplicateEntry:
|
||||
raise loadbalancer.MemberExists(
|
||||
address=v['address'],
|
||||
port=v['protocol_port'],
|
||||
pool=v['pool_id'])
|
||||
|
||||
def update_member(self, context, id, member):
|
||||
v = member['member']
|
||||
try:
|
||||
with context.session.begin(subtransactions=True):
|
||||
member_db = self._get_resource(context, Member, id)
|
||||
self.assert_modification_allowed(member_db)
|
||||
if v:
|
||||
member_db.update(v)
|
||||
return self._make_member_dict(member_db)
|
||||
except exception.DBDuplicateEntry:
|
||||
raise loadbalancer.MemberExists(
|
||||
address=member_db['address'],
|
||||
port=member_db['protocol_port'],
|
||||
pool=member_db['pool_id'])
|
||||
|
||||
def delete_member(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
member_db = self._get_resource(context, Member, id)
|
||||
context.session.delete(member_db)
|
||||
|
||||
def get_member(self, context, id, fields=None):
|
||||
member = self._get_resource(context, Member, id)
|
||||
return self._make_member_dict(member, fields)
|
||||
|
||||
def get_members(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, Member,
|
||||
self._make_member_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
########################################################
|
||||
# HealthMonitor DB access
|
||||
def _make_health_monitor_dict(self, health_monitor, fields=None):
|
||||
res = {'id': health_monitor['id'],
|
||||
'tenant_id': health_monitor['tenant_id'],
|
||||
'type': health_monitor['type'],
|
||||
'delay': health_monitor['delay'],
|
||||
'timeout': health_monitor['timeout'],
|
||||
'max_retries': health_monitor['max_retries'],
|
||||
'admin_state_up': health_monitor['admin_state_up']}
|
||||
# no point to add the values below to
|
||||
# the result if the 'type' is not HTTP/S
|
||||
if res['type'] in ['HTTP', 'HTTPS']:
|
||||
for attr in ['url_path', 'http_method', 'expected_codes']:
|
||||
res[attr] = health_monitor[attr]
|
||||
res['pools'] = [{'pool_id': p['pool_id'],
|
||||
'status': p['status'],
|
||||
'status_description': p['status_description']}
|
||||
for p in health_monitor.pools]
|
||||
return self._fields(res, fields)
|
||||
|
||||
def create_health_monitor(self, context, health_monitor):
|
||||
v = health_monitor['health_monitor']
|
||||
tenant_id = self._get_tenant_id_for_create(context, v)
|
||||
with context.session.begin(subtransactions=True):
|
||||
# setting ACTIVE status since healthmon is shared DB object
|
||||
monitor_db = HealthMonitor(id=uuidutils.generate_uuid(),
|
||||
tenant_id=tenant_id,
|
||||
type=v['type'],
|
||||
delay=v['delay'],
|
||||
timeout=v['timeout'],
|
||||
max_retries=v['max_retries'],
|
||||
http_method=v['http_method'],
|
||||
url_path=v['url_path'],
|
||||
expected_codes=v['expected_codes'],
|
||||
admin_state_up=v['admin_state_up'])
|
||||
context.session.add(monitor_db)
|
||||
return self._make_health_monitor_dict(monitor_db)
|
||||
|
||||
def update_health_monitor(self, context, id, health_monitor):
|
||||
v = health_monitor['health_monitor']
|
||||
with context.session.begin(subtransactions=True):
|
||||
monitor_db = self._get_resource(context, HealthMonitor, id)
|
||||
self.assert_modification_allowed(monitor_db)
|
||||
if v:
|
||||
monitor_db.update(v)
|
||||
return self._make_health_monitor_dict(monitor_db)
|
||||
|
||||
def delete_health_monitor(self, context, id):
|
||||
"""Delete health monitor object from DB
|
||||
|
||||
Raises an error if the monitor has associations with pools
|
||||
"""
|
||||
query = self._model_query(context, PoolMonitorAssociation)
|
||||
has_associations = query.filter_by(monitor_id=id).first()
|
||||
if has_associations:
|
||||
raise loadbalancer.HealthMonitorInUse(monitor_id=id)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
monitor_db = self._get_resource(context, HealthMonitor, id)
|
||||
context.session.delete(monitor_db)
|
||||
|
||||
def get_health_monitor(self, context, id, fields=None):
|
||||
healthmonitor = self._get_resource(context, HealthMonitor, id)
|
||||
return self._make_health_monitor_dict(healthmonitor, fields)
|
||||
|
||||
def get_health_monitors(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, HealthMonitor,
|
||||
self._make_health_monitor_dict,
|
||||
filters=filters, fields=fields)
|
|
@ -1,239 +0,0 @@
|
|||
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
|
||||
#
|
||||
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
|
||||
from neutron.common import constants
|
||||
from neutron.db import api as dbapi
|
||||
from neutron.db import db_base_plugin_v2 as base_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import metering
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
|
||||
direction = sa.Column(sa.Enum('ingress', 'egress',
|
||||
name='meteringlabels_direction'))
|
||||
remote_ip_prefix = sa.Column(sa.String(64))
|
||||
metering_label_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("meteringlabels.id",
|
||||
ondelete="CASCADE"),
|
||||
nullable=False)
|
||||
excluded = sa.Column(sa.Boolean, default=False)
|
||||
|
||||
|
||||
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
name = sa.Column(sa.String(255))
|
||||
description = sa.Column(sa.String(1024))
|
||||
rules = orm.relationship(MeteringLabelRule, backref="label",
|
||||
cascade="delete", lazy="joined")
|
||||
routers = orm.relationship(
|
||||
l3_db.Router,
|
||||
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
|
||||
foreign_keys='MeteringLabel.tenant_id',
|
||||
uselist=True)
|
||||
|
||||
|
||||
class MeteringDbMixin(metering.MeteringPluginBase,
|
||||
base_db.CommonDbMixin):
|
||||
|
||||
def __init__(self):
|
||||
dbapi.register_models()
|
||||
|
||||
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
|
||||
|
||||
def _make_metering_label_dict(self, metering_label, fields=None):
|
||||
res = {'id': metering_label['id'],
|
||||
'name': metering_label['name'],
|
||||
'description': metering_label['description'],
|
||||
'tenant_id': metering_label['tenant_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def create_metering_label(self, context, metering_label):
|
||||
m = metering_label['metering_label']
|
||||
tenant_id = self._get_tenant_id_for_create(context, m)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
|
||||
description=m['description'],
|
||||
tenant_id=tenant_id,
|
||||
name=m['name'])
|
||||
context.session.add(metering_db)
|
||||
|
||||
return self._make_metering_label_dict(metering_db)
|
||||
|
||||
def delete_metering_label(self, context, label_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
label = self._get_by_id(context, MeteringLabel, label_id)
|
||||
except orm.exc.NoResultFound:
|
||||
raise metering.MeteringLabelNotFound(label_id=label_id)
|
||||
|
||||
context.session.delete(label)
|
||||
|
||||
def get_metering_label(self, context, label_id, fields=None):
|
||||
try:
|
||||
metering_label = self._get_by_id(context, MeteringLabel, label_id)
|
||||
except orm.exc.NoResultFound:
|
||||
raise metering.MeteringLabelNotFound(label_id=label_id)
|
||||
|
||||
return self._make_metering_label_dict(metering_label, fields)
|
||||
|
||||
def get_metering_labels(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
|
||||
marker)
|
||||
return self._get_collection(context, MeteringLabel,
|
||||
self._make_metering_label_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
|
||||
res = {'id': metering_label_rule['id'],
|
||||
'metering_label_id': metering_label_rule['metering_label_id'],
|
||||
'direction': metering_label_rule['direction'],
|
||||
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
|
||||
'excluded': metering_label_rule['excluded']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def get_metering_label_rules(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
|
||||
limit, marker)
|
||||
|
||||
return self._get_collection(context, MeteringLabelRule,
|
||||
self._make_metering_label_rule_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def get_metering_label_rule(self, context, rule_id, fields=None):
|
||||
try:
|
||||
metering_label_rule = self._get_by_id(context,
|
||||
MeteringLabelRule, rule_id)
|
||||
except orm.exc.NoResultFound:
|
||||
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
|
||||
|
||||
return self._make_metering_label_rule_dict(metering_label_rule, fields)
|
||||
|
||||
def _validate_cidr(self, context, label_id, remote_ip_prefix,
|
||||
direction, excluded):
|
||||
r_ips = self.get_metering_label_rules(context,
|
||||
filters={'metering_label_id':
|
||||
label_id,
|
||||
'direction':
|
||||
[direction],
|
||||
'excluded':
|
||||
[excluded]},
|
||||
fields=['remote_ip_prefix'])
|
||||
|
||||
cidrs = [r['remote_ip_prefix'] for r in r_ips]
|
||||
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
|
||||
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
|
||||
raise metering.MeteringLabelRuleOverlaps(remote_ip_prefix=
|
||||
remote_ip_prefix)
|
||||
|
||||
def create_metering_label_rule(self, context, metering_label_rule):
|
||||
m = metering_label_rule['metering_label_rule']
|
||||
with context.session.begin(subtransactions=True):
|
||||
label_id = m['metering_label_id']
|
||||
ip_prefix = m['remote_ip_prefix']
|
||||
direction = m['direction']
|
||||
excluded = m['excluded']
|
||||
|
||||
self._validate_cidr(context, label_id, ip_prefix, direction,
|
||||
excluded)
|
||||
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
|
||||
metering_label_id=label_id,
|
||||
direction=direction,
|
||||
excluded=m['excluded'],
|
||||
remote_ip_prefix=ip_prefix)
|
||||
context.session.add(metering_db)
|
||||
|
||||
return self._make_metering_label_rule_dict(metering_db)
|
||||
|
||||
def delete_metering_label_rule(self, context, rule_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
|
||||
except orm.exc.NoResultFound:
|
||||
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
|
||||
|
||||
context.session.delete(rule)
|
||||
|
||||
def _get_metering_rules_dict(self, metering_label):
|
||||
rules = []
|
||||
for rule in metering_label.rules:
|
||||
rule_dict = self._make_metering_label_rule_dict(rule)
|
||||
rules.append(rule_dict)
|
||||
|
||||
return rules
|
||||
|
||||
def _make_router_dict(self, router):
|
||||
res = {'id': router['id'],
|
||||
'name': router['name'],
|
||||
'tenant_id': router['tenant_id'],
|
||||
'admin_state_up': router['admin_state_up'],
|
||||
'status': router['status'],
|
||||
'gw_port_id': router['gw_port_id'],
|
||||
constants.METERING_LABEL_KEY: []}
|
||||
|
||||
return res
|
||||
|
||||
def _process_sync_metering_data(self, labels):
|
||||
routers_dict = {}
|
||||
for label in labels:
|
||||
routers = label.routers
|
||||
for router in routers:
|
||||
router_dict = routers_dict.get(
|
||||
router['id'],
|
||||
self._make_router_dict(router))
|
||||
|
||||
rules = self._get_metering_rules_dict(label)
|
||||
|
||||
data = {'id': label['id'], 'rules': rules}
|
||||
router_dict[constants.METERING_LABEL_KEY].append(data)
|
||||
|
||||
routers_dict[router['id']] = router_dict
|
||||
|
||||
return routers_dict.values()
|
||||
|
||||
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
|
||||
labels = context.session.query(MeteringLabel)
|
||||
|
||||
if label_id:
|
||||
labels = labels.filter(MeteringLabel.id == label_id)
|
||||
elif router_ids:
|
||||
labels = (labels.join(MeteringLabel.routers).
|
||||
filter(l3_db.Router.id.in_(router_ids)))
|
||||
|
||||
return self._process_sync_metering_data(labels)
|
|
@ -1,55 +0,0 @@
|
|||
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
|
||||
#
|
||||
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import constants as consts
|
||||
from neutron.common import utils
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as service_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MeteringRpcCallbacks(object):
|
||||
|
||||
RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self, meter_plugin):
|
||||
self.meter_plugin = meter_plugin
|
||||
|
||||
def get_sync_data_metering(self, context, **kwargs):
|
||||
l3_plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
if not l3_plugin:
|
||||
return
|
||||
|
||||
host = kwargs.get('host')
|
||||
if not utils.is_extension_supported(
|
||||
l3_plugin, consts.L3_AGENT_SCHEDULER_EXT_ALIAS) or not host:
|
||||
return self.meter_plugin.get_sync_data_metering(context)
|
||||
else:
|
||||
agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
|
||||
if not agents:
|
||||
LOG.error(_('Unable to find agent %s.'), host)
|
||||
return
|
||||
|
||||
routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
|
||||
router_ids = [router['id'] for router in routers['routers']]
|
||||
if not router_ids:
|
||||
return
|
||||
|
||||
return self.meter_plugin.get_sync_data_metering(context,
|
||||
router_ids=router_ids)
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
OVS_PLUGIN = ('neutron.plugins.openvswitch.ovs_neutron_plugin'
|
||||
'.OVSNeutronPluginV2')
|
||||
CISCO_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2'
|
||||
|
||||
|
||||
def should_run(active_plugins, migrate_plugins):
|
||||
if '*' in migrate_plugins:
|
||||
return True
|
||||
else:
|
||||
if (CISCO_PLUGIN not in migrate_plugins and
|
||||
OVS_PLUGIN in migrate_plugins):
|
||||
migrate_plugins.append(CISCO_PLUGIN)
|
||||
return set(active_plugins) & set(migrate_plugins)
|
||||
|
||||
|
||||
def alter_enum(table, column, enum_type, nullable):
|
||||
bind = op.get_bind()
|
||||
engine = bind.engine
|
||||
if engine.name == 'postgresql':
|
||||
values = {'table': table,
|
||||
'column': column,
|
||||
'name': enum_type.name}
|
||||
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
|
||||
enum_type.create(bind, checkfirst=False)
|
||||
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
|
||||
"old_%(column)s" % values)
|
||||
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
|
||||
op.execute("UPDATE %(table)s SET %(column)s = "
|
||||
"old_%(column)s::text::%(name)s" % values)
|
||||
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
|
||||
op.execute("DROP TYPE old_%(name)s" % values)
|
||||
else:
|
||||
op.alter_column(table, column, type_=enum_type,
|
||||
existing_nullable=nullable)
|
|
@ -1,106 +0,0 @@
|
|||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Mark McClain, DreamHost
|
||||
|
||||
from logging import config as logging_config
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import create_engine, pool
|
||||
|
||||
from neutron.db import model_base
|
||||
from neutron.openstack.common import importutils
|
||||
|
||||
|
||||
DATABASE_QUOTA_DRIVER = 'neutron.extensions._quotav2_driver.DbQuotaDriver'
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
config = context.config
|
||||
neutron_config = config.neutron_config
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
# This line sets up loggers basically.
|
||||
logging_config.fileConfig(config.config_file_name)
|
||||
|
||||
plugin_class_path = neutron_config.core_plugin
|
||||
active_plugins = [plugin_class_path]
|
||||
active_plugins += neutron_config.service_plugins
|
||||
|
||||
for class_path in active_plugins:
|
||||
importutils.import_class(class_path)
|
||||
|
||||
# set the target for 'autogenerate' support
|
||||
target_metadata = model_base.BASEV2.metadata
|
||||
|
||||
|
||||
def run_migrations_offline():
|
||||
"""Run migrations in 'offline' mode.
|
||||
|
||||
This configures the context with either a URL
|
||||
or an Engine.
|
||||
|
||||
Calls to context.execute() here emit the given string to the
|
||||
script output.
|
||||
|
||||
"""
|
||||
kwargs = dict()
|
||||
if neutron_config.database.connection:
|
||||
kwargs['url'] = neutron_config.database.connection
|
||||
else:
|
||||
kwargs['dialect_name'] = neutron_config.database.engine
|
||||
context.configure(**kwargs)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations(active_plugins=active_plugins,
|
||||
options=build_options())
|
||||
|
||||
|
||||
def run_migrations_online():
|
||||
"""Run migrations in 'online' mode.
|
||||
|
||||
In this scenario we need to create an Engine
|
||||
and associate a connection with the context.
|
||||
|
||||
"""
|
||||
engine = create_engine(
|
||||
neutron_config.database.connection,
|
||||
poolclass=pool.NullPool)
|
||||
|
||||
connection = engine.connect()
|
||||
context.configure(
|
||||
connection=connection,
|
||||
target_metadata=target_metadata
|
||||
)
|
||||
|
||||
try:
|
||||
with context.begin_transaction():
|
||||
context.run_migrations(active_plugins=active_plugins,
|
||||
options=build_options())
|
||||
finally:
|
||||
connection.close()
|
||||
|
||||
|
||||
def build_options():
|
||||
return {'folsom_quota_db_enabled': is_db_quota_enabled()}
|
||||
|
||||
|
||||
def is_db_quota_enabled():
|
||||
return neutron_config.QUOTAS.quota_driver == DATABASE_QUOTA_DRIVER
|
||||
|
||||
|
||||
if context.is_offline_mode():
|
||||
run_migrations_offline()
|
||||
else:
|
||||
run_migrations_online()
|
|
@ -1,61 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""nec-pf-port-del
|
||||
|
||||
Revision ID: 1064e98b7917
|
||||
Revises: 3d6fae8b70b0
|
||||
Create Date: 2013-09-24 05:33:54.602618
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1064e98b7917'
|
||||
down_revision = '3d6fae8b70b0'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nec.nec_plugin.NECPluginV2'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.alter_column('packetfilters', 'in_port',
|
||||
existing_type=sa.String(length=36),
|
||||
nullable=True)
|
||||
op.create_foreign_key(
|
||||
'packetfilters_ibfk_2',
|
||||
source='packetfilters', referent='ports',
|
||||
local_cols=['in_port'], remote_cols=['id'],
|
||||
ondelete='CASCADE')
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_constraint('packetfilters_ibfk_2', 'packetfilters', 'foreignkey')
|
||||
op.alter_column('packetfilters', 'in_port',
|
||||
existing_type=sa.String(length=36),
|
||||
nullable=False)
|
|
@ -1,68 +0,0 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""nuage_extraroute
|
||||
|
||||
Revision ID: 10cd28e692e9
|
||||
Revises: 1b837a7125a9
|
||||
Create Date: 2014-05-14 14:47:53.148132
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '10cd28e692e9'
|
||||
down_revision = '1b837a7125a9'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nuage.plugin.NuagePlugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.create_table(
|
||||
'routerroutes_mapping',
|
||||
sa.Column('router_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('nuage_route_id', sa.String(length=36), nullable=True),
|
||||
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
|
||||
ondelete='CASCADE'),
|
||||
)
|
||||
op.create_table(
|
||||
'routerroutes',
|
||||
sa.Column('destination', sa.String(length=64), nullable=False),
|
||||
sa.Column('nexthop', sa.String(length=64), nullable=False),
|
||||
sa.Column('router_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('destination', 'nexthop',
|
||||
'router_id'),
|
||||
)
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_table('routerroutes')
|
||||
op.drop_table('routerroutes_mapping')
|
|
@ -1,82 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""initial port security
|
||||
|
||||
Revision ID: 1149d7de0cfa
|
||||
Revises: 1b693c095aa3
|
||||
Create Date: 2013-01-22 14:05:20.696502
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1149d7de0cfa'
|
||||
down_revision = '1b693c095aa3'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('networksecuritybindings',
|
||||
sa.Column('network_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('port_security_enabled', sa.Boolean(),
|
||||
nullable=False),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('network_id'))
|
||||
op.create_table('portsecuritybindings',
|
||||
sa.Column('port_id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('port_security_enabled', sa.Boolean(),
|
||||
nullable=False),
|
||||
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('port_id'))
|
||||
### end Alembic commands ###
|
||||
|
||||
# Copy network and port ids over to network|port(securitybindings) table
|
||||
# and set port_security_enabled to false as ip address pairs were not
|
||||
# configured in NVP/NSX originally.
|
||||
op.execute("INSERT INTO networksecuritybindings SELECT id as "
|
||||
"network_id, False as port_security_enabled from networks")
|
||||
op.execute("INSERT INTO portsecuritybindings SELECT id as port_id, "
|
||||
"False as port_security_enabled from ports")
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('portsecuritybindings')
|
||||
op.drop_table('networksecuritybindings')
|
||||
### end Alembic commands ###
|
|
@ -1,60 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Pool Monitor status field
|
||||
|
||||
Revision ID: 11c6e18605c8
|
||||
Revises: 52ff27f7567a
|
||||
Create Date: 2013-07-10 06:07:20.878520
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '11c6e18605c8'
|
||||
down_revision = '52ff27f7567a'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.add_column('poolmonitorassociations', sa.Column('status',
|
||||
sa.String(16),
|
||||
server_default='',
|
||||
nullable=False))
|
||||
op.add_column('poolmonitorassociations', sa.Column('status_description',
|
||||
sa.String(255)))
|
||||
|
||||
# Set status to ACTIVE for existing associations
|
||||
op.execute("UPDATE poolmonitorassociations SET status='ACTIVE'")
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_column('poolmonitorassociations', 'status')
|
||||
op.drop_column('poolmonitorassociations', 'status_description')
|
|
@ -1,69 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""ext_gw_mode
|
||||
|
||||
Revision ID: 128e042a2b68
|
||||
Revises: 32b517556ec9
|
||||
Create Date: 2013-03-27 00:35:17.323280
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '128e042a2b68'
|
||||
down_revision = '32b517556ec9'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.hyperv.hyperv_neutron_plugin.HyperVNeutronPlugin',
|
||||
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
|
||||
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
|
||||
'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||
'neutron.plugins.nec.nec_plugin.NECPluginV2',
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
|
||||
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin',
|
||||
'neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin',
|
||||
'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
|
||||
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
|
||||
'neutron.plugins.cisco.network_plugin.PluginV2',
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.add_column('routers', sa.Column('enable_snat', sa.Boolean(),
|
||||
nullable=False, default=True))
|
||||
# Set enable_snat to True for existing routers
|
||||
op.execute("UPDATE routers SET enable_snat=True")
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_column('routers', 'enable_snat')
|
|
@ -1,68 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""nvp_net_binding
|
||||
|
||||
Revision ID: 1341ed32cc1e
|
||||
Revises: 4692d074d587
|
||||
Create Date: 2013-02-26 01:28:29.182195
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1341ed32cc1e'
|
||||
down_revision = '4692d074d587'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
new_type = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
|
||||
name='nvp_network_bindings_binding_type')
|
||||
old_type = sa.Enum('flat', 'vlan', 'stt', 'gre',
|
||||
name='nvp_network_bindings_binding_type')
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
op.alter_column('nvp_network_bindings', 'tz_uuid',
|
||||
name='phy_uuid',
|
||||
existing_type=sa.String(36),
|
||||
existing_nullable=True)
|
||||
migration.alter_enum('nvp_network_bindings', 'binding_type', new_type,
|
||||
nullable=False)
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
op.alter_column('nvp_network_bindings', 'phy_uuid',
|
||||
name='tz_uuid',
|
||||
existing_type=sa.String(36),
|
||||
existing_nullable=True)
|
||||
migration.alter_enum('nvp_network_bindings', 'binding_type', old_type,
|
||||
nullable=False)
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""nec_add_pf_name
|
||||
|
||||
Revision ID: 13de305df56e
|
||||
Revises: b7a8863760e
|
||||
Create Date: 2013-07-06 00:42:26.991175
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '13de305df56e'
|
||||
down_revision = 'b7a8863760e'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nec.nec_plugin.NECPluginV2'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.add_column('packetfilters',
|
||||
sa.Column('name', sa.String(length=255), nullable=True))
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_column('packetfilters', 'name')
|
|
@ -1,76 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""DB Migration for Arista ml2 mechanism driver
|
||||
|
||||
Revision ID: 14f24494ca31
|
||||
Revises: 2a3bae1ceb8
|
||||
Create Date: 2013-08-15 18:54:16.083640
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '14f24494ca31'
|
||||
down_revision = '2a3bae1ceb8'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.create_table(
|
||||
'arista_provisioned_nets',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('segmentation_id', sa.Integer(),
|
||||
autoincrement=False, nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'arista_provisioned_vms',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('vm_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('host_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('port_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
op.create_table(
|
||||
'arista_provisioned_tenants',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'))
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_table('arista_provisioned_tenants')
|
||||
op.drop_table('arista_provisioned_vms')
|
||||
op.drop_table('arista_provisioned_nets')
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""ml2 binding:profile
|
||||
|
||||
Revision ID: 157a5d299379
|
||||
Revises: 50d5ba354c23
|
||||
Create Date: 2014-02-13 23:48:25.147279
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '157a5d299379'
|
||||
down_revision = '50d5ba354c23'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.add_column('ml2_port_bindings',
|
||||
sa.Column('profile', sa.String(length=4095),
|
||||
nullable=False, server_default=''))
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_column('ml2_port_bindings', 'profile')
|
|
@ -1,64 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Add portbindings db
|
||||
|
||||
Revision ID: 176a85fc7d79
|
||||
Revises: f489cf14a79c
|
||||
Create Date: 2013-03-21 14:59:53.052600
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '176a85fc7d79'
|
||||
down_revision = 'f489cf14a79c'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2',
|
||||
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
|
||||
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin',
|
||||
'neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2',
|
||||
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.create_table(
|
||||
'portbindingports',
|
||||
sa.Column('port_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('host', sa.String(length=255), nullable=False),
|
||||
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('port_id')
|
||||
)
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
op.drop_table('portbindingports')
|
|
@ -1,62 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Quota ext support added in Grizzly
|
||||
|
||||
Revision ID: 1b693c095aa3
|
||||
Revises: 1d76643bcec4
|
||||
Create Date: 2013-01-19 02:58:17.667524
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1b693c095aa3'
|
||||
down_revision = '2a6d0b51f4bb'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nec.nec_plugin.NECPluginV2'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table(
|
||||
'quotas',
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('resource', sa.String(length=255), nullable=True),
|
||||
sa.Column('limit', sa.Integer(), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('quotas')
|
||||
### end Alembic commands ###
|
|
@ -1,74 +0,0 @@
|
|||
# Copyright 2014 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Cisco APIC Mechanism Driver
|
||||
|
||||
Revision ID: 1b837a7125a9
|
||||
Revises: 6be312499f9
|
||||
Create Date: 2014-02-13 09:35:19.147619
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1b837a7125a9'
|
||||
down_revision = '6be312499f9'
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.create_table(
|
||||
'cisco_ml2_apic_epgs',
|
||||
sa.Column('network_id', sa.String(length=255), nullable=False),
|
||||
sa.Column('epg_id', sa.String(length=64), nullable=False),
|
||||
sa.Column('segmentation_id', sa.String(length=64), nullable=False),
|
||||
sa.Column('provider', sa.Boolean(), default=False, nullable=False),
|
||||
sa.PrimaryKeyConstraint('network_id'))
|
||||
|
||||
op.create_table(
|
||||
'cisco_ml2_apic_port_profiles',
|
||||
sa.Column('node_id', sa.String(length=255), nullable=False),
|
||||
sa.Column('profile_id', sa.String(length=64), nullable=False),
|
||||
sa.Column('hpselc_id', sa.String(length=64), nullable=False),
|
||||
sa.Column('module', sa.String(length=10), nullable=False),
|
||||
sa.Column('from_port', sa.Integer(), nullable=False),
|
||||
sa.Column('to_port', sa.Integer(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('node_id'))
|
||||
|
||||
op.create_table(
|
||||
'cisco_ml2_apic_contracts',
|
||||
sa.Column('tenant_id', sa.String(length=255), nullable=False),
|
||||
sa.Column('contract_id', sa.String(length=64), nullable=False),
|
||||
sa.Column('filter_id', sa.String(length=64), nullable=False),
|
||||
sa.PrimaryKeyConstraint('tenant_id'))
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_table('cisco_ml2_apic_contracts')
|
||||
op.drop_table('cisco_ml2_apic_port_profiles')
|
||||
op.drop_table('cisco_ml2_apic_epgs')
|
|
@ -1,80 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Support routing table configuration on Router
|
||||
|
||||
Revision ID: 1c33fa3cd1a1
|
||||
Revises: 45680af419f9
|
||||
Create Date: 2013-01-17 14:35:09.386975
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1c33fa3cd1a1'
|
||||
down_revision = '45680af419f9'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
|
||||
'neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2',
|
||||
'neutron.plugins.metaplugin.meta_neutron_plugin.MetaPluginV2',
|
||||
'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||
'neutron.plugins.nec.nec_plugin.NECPluginV2',
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin',
|
||||
'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2',
|
||||
'neutron.plugins.cisco.network_plugin.PluginV2',
|
||||
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.rename_table(
|
||||
'routes',
|
||||
'subnetroutes',
|
||||
)
|
||||
op.create_table(
|
||||
'routerroutes',
|
||||
sa.Column('destination', sa.String(length=64), nullable=False),
|
||||
sa.Column(
|
||||
'nexthop', sa.String(length=64), nullable=False),
|
||||
sa.Column('router_id', sa.String(length=36), nullable=False),
|
||||
sa.ForeignKeyConstraint(
|
||||
['router_id'], ['routers.id'], ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('destination', 'nexthop', 'router_id')
|
||||
)
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.rename_table(
|
||||
'subnetroutes',
|
||||
'routes',
|
||||
)
|
||||
op.drop_table('routerroutes')
|
|
@ -1,65 +0,0 @@
|
|||
# Copyright 2013 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""nvp_netbinding
|
||||
|
||||
Revision ID: 1d76643bcec4
|
||||
Revises: 3cb5d900c5de
|
||||
Create Date: 2013-01-15 07:36:10.024346
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '1d76643bcec4'
|
||||
down_revision = '3cb5d900c5de'
|
||||
|
||||
# Change to ['*'] if this migration applies to all plugins
|
||||
|
||||
migration_for_plugins = [
|
||||
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
|
||||
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxPlugin',
|
||||
'neutron.plugins.vmware.plugin.NsxServicePlugin'
|
||||
]
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import migration
|
||||
|
||||
|
||||
def upgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.create_table(
|
||||
'nvp_network_bindings',
|
||||
sa.Column('network_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('binding_type',
|
||||
sa.Enum('flat', 'vlan', 'stt', 'gre',
|
||||
name='nvp_network_bindings_binding_type'),
|
||||
nullable=False),
|
||||
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('vlan_id', sa.Integer(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
|
||||
ondelete='CASCADE'),
|
||||
sa.PrimaryKeyConstraint('network_id'))
|
||||
|
||||
|
||||
def downgrade(active_plugins=None, options=None):
|
||||
if not migration.should_run(active_plugins, migration_for_plugins):
|
||||
return
|
||||
|
||||
op.drop_table('nvp_network_bindings')
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue