Merge remote-tracking branch 'origin/master' into merge-branch
Change-Id: I7bd5d0568e773365eaab1ebd99dcd7186da22dae
This commit is contained in:
commit
d7e60d59a3
|
@ -69,7 +69,7 @@ do whatever they are supposed to do. In a callback-less world this would work li
|
|||
C->my_random_very_difficult_to_remember_method_about_router_created()
|
||||
|
||||
If B and/or C change, things become sour. In a callback-based world, things become a lot
|
||||
more uniform and straightward:
|
||||
more uniform and straightforward:
|
||||
|
||||
::
|
||||
|
||||
|
@ -319,7 +319,7 @@ Is the registry thread-safe?
|
|||
|
||||
Short answer is no: it is not safe to make mutations while callbacks are being called (more
|
||||
details as to why can be found `here <https://hg.python.org/releasing/2.7.9/file/753a8f457ddc/Objects/dictobject.c#l937>`_).
|
||||
A mutation could happen if a 'subscribe'/'unsuscribe' operation interleaves with the execution
|
||||
A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution
|
||||
of the notify loop. Albeit there is a possibility that things may end up in a bad state, the
|
||||
registry works correctly under the assumption that subscriptions happen at the very beginning
|
||||
of the life of the process and that the unsubscriptions (if any) take place at the very end.
|
||||
|
|
|
@ -1,2 +1,27 @@
|
|||
===============================
|
||||
L2 Networking with Linux Bridge
|
||||
-------------------------------
|
||||
===============================
|
||||
|
||||
This Agent uses the `Linux Bridge
|
||||
<http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge>`_ to
|
||||
provide L2 connectivity for VM instances running on the compute node to the
|
||||
public network. A graphical illustration of the deployment can be found in
|
||||
`OpenStack Admin Guide Linux Bridge
|
||||
<http://docs.openstack.org/admin-guide-cloud/content/under_the_hood_linuxbridge.html>`_
|
||||
|
||||
In most common deployments, there is a compute and a network node. On both the
|
||||
compute and the network node, the Linux Bridge Agent will manage virtual
|
||||
switches, connectivity among them, and interaction via virtual ports with other
|
||||
network components such as namespaces and underlying interfaces. Additionally,
|
||||
on the compute node, the Linux Bridge Agent will manage security groups.
|
||||
|
||||
Three use cases and their packet flow are documented as follows:
|
||||
|
||||
1. `Legacy implementation with Linux Bridge
|
||||
<http://docs.openstack.org/networking-guide/deploy_scenario1b.html>`_
|
||||
|
||||
2. `High Availability using L3HA with Linux Bridge
|
||||
<http://docs.openstack.org/networking-guide/deploy_scenario3b.html>`_
|
||||
|
||||
3. `Provider networks with Linux Bridge
|
||||
<http://docs.openstack.org/networking-guide/deploy_scenario4b.html>`_
|
||||
|
|
|
@ -18,7 +18,7 @@ changes to the infrastructure documentation using this url[2] (and please
|
|||
review the patches) and check this doc on a regular basis for updates.
|
||||
|
||||
[1] http://ci.openstack.org/third_party.html
|
||||
[2] https://review.openstack.org/#/q/status:open+project:openstack-infra/config+branch:master+topic:third-party,n,z
|
||||
[2] https://review.openstack.org/#/q/status:open+project:openstack-infra/system-config+branch:master+topic:third-party,n,z
|
||||
|
||||
What Changes to Run Against
|
||||
---------------------------
|
||||
|
|
|
@ -90,3 +90,7 @@
|
|||
# Timeout for ovs-vsctl commands.
|
||||
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
|
||||
# ovs_vsctl_timeout = 10
|
||||
|
||||
[AGENT]
|
||||
# Log agent heartbeats from this DHCP agent
|
||||
# log_agent_heartbeats = False
|
||||
|
|
|
@ -122,3 +122,7 @@
|
|||
|
||||
# The advertisement interval in seconds
|
||||
# ha_vrrp_advert_int = 2
|
||||
|
||||
[AGENT]
|
||||
# Log agent heartbeats from this L3 agent
|
||||
# log_agent_heartbeats = False
|
||||
|
|
|
@ -66,3 +66,7 @@ admin_password = %SERVICE_PASSWORD%
|
|||
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
|
||||
# No cache is used in case no value is passed.
|
||||
# cache_url = memory://?default_ttl=5
|
||||
|
||||
[AGENT]
|
||||
# Log agent heartbeats from this Metadata agent
|
||||
# log_agent_heartbeats = False
|
||||
|
|
|
@ -58,6 +58,9 @@
|
|||
# of_interface = ovs-ofctl
|
||||
|
||||
[agent]
|
||||
# Log agent heartbeats from this OVS agent
|
||||
# log_agent_heartbeats = False
|
||||
|
||||
# Agent's polling interval in seconds
|
||||
# polling_interval = 2
|
||||
|
|
@ -156,6 +156,9 @@
|
|||
# lock management.
|
||||
# locking_coordinator_url =
|
||||
|
||||
# (Optional) DHCP lease time
|
||||
# dhcp_lease_time = 86400
|
||||
|
||||
[nsx]
|
||||
# Maximum number of ports for each bridged logical switch
|
||||
# The recommended value for this parameter varies with NSX version
|
||||
|
|
|
@ -44,6 +44,8 @@ AGENT_STATE_OPTS = [
|
|||
help=_('Seconds between nodes reporting state to server; '
|
||||
'should be less than agent_down_time, best if it '
|
||||
'is half or less than agent_down_time.')),
|
||||
cfg.BoolOpt('log_agent_heartbeats', default=False,
|
||||
help=_('Log agent heartbeats')),
|
||||
]
|
||||
|
||||
INTERFACE_DRIVER_OPTS = [
|
||||
|
|
|
@ -21,6 +21,7 @@ import eventlet
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import importutils
|
||||
|
||||
from neutron.agent.linux import dhcp
|
||||
|
@ -36,7 +37,6 @@ from neutron.common import utils
|
|||
from neutron import context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -548,7 +548,8 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
|||
'configurations': {
|
||||
'dhcp_driver': cfg.CONF.dhcp_driver,
|
||||
'use_namespaces': cfg.CONF.use_namespaces,
|
||||
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
|
||||
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
|
||||
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
|
||||
'start_flag': True,
|
||||
'agent_type': constants.AGENT_TYPE_DHCP}
|
||||
report_interval = cfg.CONF.AGENT.report_interval
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_service import service
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.dhcp import config as dhcp_config
|
||||
|
@ -24,7 +25,6 @@ from neutron.agent.linux import interface
|
|||
from neutron.agent.metadata import config as metadata_config
|
||||
from neutron.common import config as common_config
|
||||
from neutron.common import topics
|
||||
from neutron.openstack.common import service
|
||||
from neutron import service as neutron_service
|
||||
|
||||
|
||||
|
@ -49,4 +49,4 @@ def main():
|
|||
topic=topics.DHCP_AGENT,
|
||||
report_interval=cfg.CONF.AGENT.report_interval,
|
||||
manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport')
|
||||
service.launch(server).wait()
|
||||
service.launch(cfg.CONF, server).wait()
|
||||
|
|
|
@ -18,6 +18,8 @@ import netaddr
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_service import periodic_task
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
|
@ -47,8 +49,6 @@ from neutron.common import topics
|
|||
from neutron import context as n_context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import periodic_task
|
||||
|
||||
try:
|
||||
from neutron_fwaas.services.firewall.agents.l3reference \
|
||||
|
@ -595,7 +595,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
|||
'external_network_bridge': self.conf.external_network_bridge,
|
||||
'gateway_external_network_id':
|
||||
self.conf.gateway_external_network_id,
|
||||
'interface_driver': self.conf.interface_driver},
|
||||
'interface_driver': self.conf.interface_driver,
|
||||
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
|
||||
'start_flag': True,
|
||||
'agent_type': l3_constants.AGENT_TYPE_L3}
|
||||
report_interval = self.conf.AGENT.report_interval
|
||||
|
|
|
@ -417,6 +417,17 @@ class DvrLocalRouter(router.RouterInfo):
|
|||
is_first = False
|
||||
if floating_ips:
|
||||
is_first = self.fip_ns.subscribe(self.router_id)
|
||||
if is_first and not fip_agent_port:
|
||||
LOG.debug("No FloatingIP agent gateway port possibly due to "
|
||||
"late binding of the private port to the host, "
|
||||
"requesting agent gateway port for 'network-id' :"
|
||||
"%s", ex_gw_port['network_id'])
|
||||
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
|
||||
self.agent.context, ex_gw_port['network_id'])
|
||||
if not fip_agent_port:
|
||||
LOG.error(_LE("No FloatingIP agent gateway port "
|
||||
"returned from server for 'network-id': "
|
||||
"%s"), ex_gw_port['network_id'])
|
||||
if is_first and fip_agent_port:
|
||||
if 'subnets' not in fip_agent_port:
|
||||
LOG.error(_LE('Missing subnet/agent_gateway_port'))
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_service import service
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.l3 import config as l3_config
|
||||
|
@ -26,7 +27,6 @@ from neutron.agent.linux import interface
|
|||
from neutron.agent.metadata import config as metadata_config
|
||||
from neutron.common import config as common_config
|
||||
from neutron.common import topics
|
||||
from neutron.openstack.common import service
|
||||
from neutron import service as neutron_service
|
||||
|
||||
|
||||
|
@ -51,4 +51,4 @@ def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'):
|
|||
topic=topics.L3_AGENT,
|
||||
report_interval=cfg.CONF.AGENT.report_interval,
|
||||
manager=manager)
|
||||
service.launch(server).wait()
|
||||
service.launch(cfg.CONF, server).wait()
|
||||
|
|
|
@ -434,6 +434,44 @@ class Dnsmasq(DhcpLocalProcess):
|
|||
LOG.debug('Reloading allocations for network: %s', self.network.id)
|
||||
self.device_manager.update(self.network, self.interface_name)
|
||||
|
||||
def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
|
||||
"""Sort fixed_ips so that stateless IPv6 subnets appear first.
|
||||
|
||||
For example, If a port with v6 extra_dhcp_opts is on a network with
|
||||
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
|
||||
below 2 entries for same MAC,
|
||||
|
||||
fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
|
||||
(entry for IPv4 dhcp)
|
||||
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
|
||||
(entry for stateless IPv6 for v6 options)
|
||||
|
||||
dnsmasq internal details for processing host file entries
|
||||
1) dnsmaq reads the host file from EOF.
|
||||
2) So it first picks up stateless IPv6 entry,
|
||||
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
|
||||
3) But dnsmasq doesn't have sufficient checks to skip this entry and
|
||||
pick next entry, to process dhcp IPv4 request.
|
||||
4) So dnsmaq uses this this entry to process dhcp IPv4 request.
|
||||
5) As there is no ip in this entry, dnsmaq logs "no address available"
|
||||
and fails to send DHCPOFFER message.
|
||||
|
||||
As we rely on internal details of dnsmasq to understand and fix the
|
||||
issue, Ihar sent a mail to dnsmasq-discuss mailing list
|
||||
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
|
||||
009650.html
|
||||
|
||||
So If we reverse the order of writing entries in host file,
|
||||
so that entry for stateless IPv6 comes first,
|
||||
then dnsmasq can correctly fetch the IPv4 address.
|
||||
"""
|
||||
return sorted(
|
||||
fixed_ips,
|
||||
key=lambda fip: ((fip.subnet_id in v6_nets) and (
|
||||
v6_nets[fip.subnet_id].ipv6_address_mode == (
|
||||
constants.DHCPV6_STATELESS))),
|
||||
reverse=True)
|
||||
|
||||
def _iter_hosts(self):
|
||||
"""Iterate over hosts.
|
||||
|
||||
|
@ -449,8 +487,11 @@ class Dnsmasq(DhcpLocalProcess):
|
|||
"""
|
||||
v6_nets = dict((subnet.id, subnet) for subnet in
|
||||
self.network.subnets if subnet.ip_version == 6)
|
||||
|
||||
for port in self.network.ports:
|
||||
for alloc in port.fixed_ips:
|
||||
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
|
||||
v6_nets)
|
||||
for alloc in fixed_ips:
|
||||
# Note(scollins) Only create entries that are
|
||||
# associated with the subnet being managed by this
|
||||
# dhcp agent
|
||||
|
@ -911,7 +952,7 @@ class DeviceManager(object):
|
|||
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
|
||||
gateway = device.route.get_gateway()
|
||||
if gateway:
|
||||
gateway = gateway['gateway']
|
||||
gateway = gateway.get('gateway')
|
||||
|
||||
for subnet in network.subnets:
|
||||
skip_subnet = (
|
||||
|
|
|
@ -19,6 +19,7 @@ import os
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
import re
|
||||
|
||||
from neutron.agent.common import utils
|
||||
from neutron.common import exceptions
|
||||
|
@ -36,6 +37,8 @@ OPTS = [
|
|||
LOOPBACK_DEVNAME = 'lo'
|
||||
|
||||
SYS_NET_PATH = '/sys/class/net'
|
||||
DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
|
||||
METRIC_PATTERN = re.compile(r"metric (\S+)")
|
||||
|
||||
|
||||
class AddressNotReady(exceptions.NeutronException):
|
||||
|
@ -531,12 +534,13 @@ class IpRouteCommand(IpDeviceCommandBase):
|
|||
route_list_lines if
|
||||
x.strip().startswith('default')), None)
|
||||
if default_route_line:
|
||||
gateway_index = 2
|
||||
parts = default_route_line.split()
|
||||
retval = dict(gateway=parts[gateway_index])
|
||||
if 'metric' in parts:
|
||||
metric_index = parts.index('metric') + 1
|
||||
retval.update(metric=int(parts[metric_index]))
|
||||
retval = dict()
|
||||
gateway = DEFAULT_GW_PATTERN.search(default_route_line)
|
||||
if gateway:
|
||||
retval.update(gateway=gateway.group(1))
|
||||
metric = METRIC_PATTERN.search(default_route_line)
|
||||
if metric:
|
||||
retval.update(metric=int(metric.group(1)))
|
||||
|
||||
return retval
|
||||
|
||||
|
@ -630,6 +634,17 @@ class IpNeighCommand(IpDeviceCommandBase):
|
|||
('show',
|
||||
'dev', self.name))
|
||||
|
||||
def flush(self, ip_version, ip_address):
|
||||
"""Flush neighbour entries
|
||||
|
||||
Given address entry is removed from neighbour cache (ARP or NDP). To
|
||||
flush all entries pass string 'all' as an address.
|
||||
|
||||
:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
|
||||
:param ip_address: The prefix selecting the neighbours to flush
|
||||
"""
|
||||
self._as_root([ip_version], ('flush', 'to', ip_address))
|
||||
|
||||
|
||||
class IpNetnsCommand(IpCommandBase):
|
||||
COMMAND = 'netns'
|
||||
|
|
|
@ -18,7 +18,7 @@ import eventlet
|
|||
|
||||
from neutron.agent.common import base_polling
|
||||
from neutron.agent.linux import ovsdb_monitor
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
@ -20,6 +20,7 @@ from neutronclient.v2_0 import client
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import webob
|
||||
|
@ -34,7 +35,6 @@ from neutron.common import utils
|
|||
from neutron import context
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.openstack.common.cache import cache
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -289,6 +289,7 @@ class UnixDomainMetadataProxy(object):
|
|||
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
|
||||
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
|
||||
'nova_metadata_port': cfg.CONF.nova_metadata_port,
|
||||
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats,
|
||||
},
|
||||
'start_flag': True,
|
||||
'agent_type': n_const.AGENT_TYPE_METADATA}
|
||||
|
|
|
@ -18,6 +18,7 @@ import itertools
|
|||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
|
@ -72,6 +73,11 @@ class PluginReportStateAPI(object):
|
|||
|
||||
def report_state(self, context, agent_state, use_call=False):
|
||||
cctxt = self.client.prepare()
|
||||
# add unique identifier to a report
|
||||
# that can be logged on server side.
|
||||
# This create visible correspondence between events on
|
||||
# the agent and on the server
|
||||
agent_state['uuid'] = uuidutils.generate_uuid()
|
||||
kwargs = {
|
||||
'agent_state': {'agent_state': agent_state},
|
||||
'time': timeutils.strtime(),
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import neutron.plugins.openvswitch.agent.main as agent_main
|
||||
import neutron.plugins.ml2.drivers.openvswitch.agent.main as agent_main
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -27,7 +27,8 @@ from neutron.agent.linux import utils as agent_utils
|
|||
from neutron.common import utils
|
||||
from neutron.i18n import _LE
|
||||
from neutron.plugins.common import constants as const
|
||||
from neutron.plugins.openvswitch.common import constants as ovs_const
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
|
||||
import constants as ovs_const
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -25,9 +25,12 @@ from neutron.i18n import _LE, _LW
|
|||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('VXLAN', 'neutron.plugins.linuxbridge.common.config')
|
||||
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
|
||||
'agent.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.'
|
||||
'agent.common.config')
|
||||
cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.'
|
||||
'agent.common.config')
|
||||
cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
|
||||
cfg.CONF.import_group('ml2_sriov',
|
||||
'neutron.plugins.ml2.drivers.mech_sriov.mech_driver')
|
||||
|
|
|
@ -18,10 +18,10 @@ from oslo_config import cfg
|
|||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_messaging import serializer as om_serializer
|
||||
from oslo_service import service
|
||||
|
||||
from neutron.common import exceptions
|
||||
from neutron import context
|
||||
from neutron.openstack.common import service
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
# Copyright (c) 2010 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Colorizer Code is borrowed from Twisted:
|
||||
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
# describes parameters used by different unit/functional tests
|
||||
# a plugin-specific testing mechanism should import this dictionary
|
||||
# and override the values in it if needed (e.g., run_tests.py in
|
||||
# neutron/plugins/openvswitch/ )
|
||||
test_config = {}
|
|
@ -29,7 +29,7 @@ from neutron.common import constants
|
|||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import agent as ext_agent
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -203,22 +203,33 @@ class AgentDbMixin(ext_agent.AgentPluginBase):
|
|||
agent = self._get_agent(context, id)
|
||||
return self._make_agent_dict(agent, fields)
|
||||
|
||||
def _create_or_update_agent(self, context, agent):
|
||||
def _log_heartbeat(self, state, agent_db, agent_conf):
|
||||
if agent_conf.get('log_agent_heartbeats'):
|
||||
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
|
||||
LOG.info(_LI("Heartbeat received from %(type)s agent on "
|
||||
"host %(host)s, uuid %(uuid)s after %(delta)s"),
|
||||
{'type': agent_db.agent_type,
|
||||
'host': agent_db.host,
|
||||
'uuid': state.get('uuid'),
|
||||
'delta': delta})
|
||||
|
||||
def _create_or_update_agent(self, context, agent_state):
|
||||
with context.session.begin(subtransactions=True):
|
||||
res_keys = ['agent_type', 'binary', 'host', 'topic']
|
||||
res = dict((k, agent[k]) for k in res_keys)
|
||||
res = dict((k, agent_state[k]) for k in res_keys)
|
||||
|
||||
configurations_dict = agent.get('configurations', {})
|
||||
configurations_dict = agent_state.get('configurations', {})
|
||||
res['configurations'] = jsonutils.dumps(configurations_dict)
|
||||
res['load'] = self._get_agent_load(agent)
|
||||
res['load'] = self._get_agent_load(agent_state)
|
||||
current_time = timeutils.utcnow()
|
||||
try:
|
||||
agent_db = self._get_agent_by_type_and_host(
|
||||
context, agent['agent_type'], agent['host'])
|
||||
context, agent_state['agent_type'], agent_state['host'])
|
||||
res['heartbeat_timestamp'] = current_time
|
||||
if agent.get('start_flag'):
|
||||
if agent_state.get('start_flag'):
|
||||
res['started_at'] = current_time
|
||||
greenthread.sleep(0)
|
||||
self._log_heartbeat(agent_state, agent_db, configurations_dict)
|
||||
agent_db.update(res)
|
||||
except ext_agent.AgentNotFoundByTypeHost:
|
||||
greenthread.sleep(0)
|
||||
|
@ -229,6 +240,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase):
|
|||
agent_db = Agent(**res)
|
||||
greenthread.sleep(0)
|
||||
context.session.add(agent_db)
|
||||
self._log_heartbeat(agent_state, agent_db, configurations_dict)
|
||||
greenthread.sleep(0)
|
||||
|
||||
def create_or_update_agent(self, context, agent):
|
||||
|
|
|
@ -19,6 +19,7 @@ import time
|
|||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import timeutils
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
|
@ -32,7 +33,6 @@ from neutron.db import model_base
|
|||
from neutron.extensions import agent as ext_agent
|
||||
from neutron.extensions import dhcpagentscheduler
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
|
|
@ -14,10 +14,13 @@
|
|||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import six
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as os_db_exception
|
||||
from oslo_db.sqlalchemy import session
|
||||
from sqlalchemy import exc
|
||||
from sqlalchemy import orm
|
||||
|
||||
|
||||
_FACADE = None
|
||||
|
@ -64,3 +67,21 @@ def autonested_transaction(sess):
|
|||
finally:
|
||||
with session_context as tx:
|
||||
yield tx
|
||||
|
||||
|
||||
class convert_db_exception_to_retry(object):
|
||||
"""Converts other types of DB exceptions into RetryRequests."""
|
||||
|
||||
def __init__(self, stale_data=False):
|
||||
self.to_catch = ()
|
||||
if stale_data:
|
||||
self.to_catch += (orm.exc.StaleDataError, )
|
||||
|
||||
def __call__(self, f):
|
||||
@six.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except self.to_catch as e:
|
||||
raise os_db_exception.RetryRequest(e)
|
||||
return wrapper
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from sqlalchemy.orm import exc
|
||||
|
@ -25,7 +23,6 @@ from neutron.common import exceptions as n_exc
|
|||
from neutron.common import utils
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import models_v2
|
||||
from neutron.ipam import utils as ipam_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -75,14 +72,6 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
|
|||
)
|
||||
context.session.add(allocated)
|
||||
|
||||
@classmethod
|
||||
def _check_gateway_in_subnet(cls, cidr, gateway):
|
||||
"""Validate that the gateway is on the subnet."""
|
||||
ip = netaddr.IPAddress(gateway)
|
||||
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
|
||||
return ipam_utils.check_subnet_ip(cidr, gateway)
|
||||
return True
|
||||
|
||||
def _make_subnet_dict(self, subnet, fields=None):
|
||||
res = {'id': subnet['id'],
|
||||
'name': subnet['name'],
|
||||
|
|
|
@ -31,7 +31,6 @@ from neutron.callbacks import resources
|
|||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.common import ipv6_utils
|
||||
from neutron.common import utils
|
||||
from neutron import context as ctx
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import ipam_non_pluggable_backend
|
||||
|
@ -356,7 +355,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
if attributes.is_attr_set(s.get('gateway_ip')):
|
||||
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
|
||||
if (cfg.CONF.force_gateway_on_subnet and
|
||||
not self._check_gateway_in_subnet(
|
||||
not ipam.utils.check_gateway_in_subnet(
|
||||
s['cidr'], s['gateway_ip'])):
|
||||
error_message = _("Gateway is not valid on subnet")
|
||||
raise n_exc.InvalidInput(error_message=error_message)
|
||||
|
@ -440,32 +439,11 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
external_gateway_info}}
|
||||
l3plugin.update_router(context, id, info)
|
||||
|
||||
def _make_subnet_request(self, tenant_id, subnet, subnetpool):
|
||||
cidr = subnet.get('cidr')
|
||||
subnet_id = subnet.get('id', uuidutils.generate_uuid())
|
||||
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
|
||||
|
||||
if is_any_subnetpool_request:
|
||||
prefixlen = subnet['prefixlen']
|
||||
if not attributes.is_attr_set(prefixlen):
|
||||
prefixlen = int(subnetpool['default_prefixlen'])
|
||||
|
||||
return ipam.AnySubnetRequest(
|
||||
tenant_id,
|
||||
subnet_id,
|
||||
utils.ip_version_from_int(subnetpool['ip_version']),
|
||||
prefixlen)
|
||||
else:
|
||||
return ipam.SpecificSubnetRequest(tenant_id,
|
||||
subnet_id,
|
||||
cidr)
|
||||
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
|
||||
retry_on_request=True,
|
||||
retry_on_deadlock=True)
|
||||
def _create_subnet_from_pool(self, context, subnet, subnetpool_id):
|
||||
s = subnet['subnet']
|
||||
tenant_id = self._get_tenant_id_for_create(context, s)
|
||||
self._validate_pools_with_subnetpool(s)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
|
@ -474,7 +452,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
|
||||
network = self._get_network(context, s["network_id"])
|
||||
allocator = subnet_alloc.SubnetAllocator(subnetpool, context)
|
||||
req = self._make_subnet_request(tenant_id, s, subnetpool)
|
||||
req = ipam.SubnetRequestFactory.get_request(context, s, subnetpool)
|
||||
|
||||
ipam_subnet = allocator.allocate_subnet(req)
|
||||
detail = ipam_subnet.get_details()
|
||||
|
@ -498,9 +476,8 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
def _create_subnet_from_implicit_pool(self, context, subnet):
|
||||
s = subnet['subnet']
|
||||
self._validate_subnet(context, s)
|
||||
tenant_id = self._get_tenant_id_for_create(context, s)
|
||||
id = s.get('id', uuidutils.generate_uuid())
|
||||
detail = ipam.SpecificSubnetRequest(tenant_id,
|
||||
detail = ipam.SpecificSubnetRequest(s['tenant_id'],
|
||||
id,
|
||||
s['cidr'])
|
||||
with context.session.begin(subtransactions=True):
|
||||
|
@ -571,6 +548,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
net = netaddr.IPNetwork(s['cidr'])
|
||||
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)
|
||||
|
||||
s['tenant_id'] = self._get_tenant_id_for_create(context, s)
|
||||
subnetpool_id = self._get_subnetpool_id(s)
|
||||
if not subnetpool_id:
|
||||
if not has_cidr:
|
||||
|
@ -608,8 +586,13 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
self._validate_subnet(context, s, cur_subnet=db_subnet)
|
||||
|
||||
if s.get('gateway_ip') is not None:
|
||||
allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']}
|
||||
for p in db_subnet.allocation_pools]
|
||||
if s.get('allocation_pools') is not None:
|
||||
allocation_pools = [{'start': p['start'], 'end': p['end']}
|
||||
for p in s['allocation_pools']]
|
||||
else:
|
||||
allocation_pools = [{'start': p['first_ip'],
|
||||
'end': p['last_ip']}
|
||||
for p in db_subnet.allocation_pools]
|
||||
self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# under the License.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
|
||||
import netaddr
|
||||
from oslo_config import cfg
|
||||
|
@ -27,6 +28,7 @@ from neutron.common import ipv6_utils
|
|||
from neutron.db import db_base_plugin_common
|
||||
from neutron.db import models_v2
|
||||
from neutron.i18n import _LI
|
||||
from neutron.ipam import utils as ipam_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -174,24 +176,10 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
a list of dict objects with 'start' and 'end' keys for
|
||||
defining the pool range.
|
||||
"""
|
||||
pools = []
|
||||
# Auto allocate the pool around gateway_ip
|
||||
net = netaddr.IPNetwork(subnet['cidr'])
|
||||
first_ip = net.first + 1
|
||||
last_ip = net.last - 1
|
||||
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
|
||||
# Use the gw_ip to find a point for splitting allocation pools
|
||||
# for this subnet
|
||||
split_ip = min(max(gw_ip, net.first), net.last)
|
||||
if split_ip > first_ip:
|
||||
pools.append({'start': str(netaddr.IPAddress(first_ip)),
|
||||
'end': str(netaddr.IPAddress(split_ip - 1))})
|
||||
if split_ip < last_ip:
|
||||
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
|
||||
'end': str(netaddr.IPAddress(last_ip))})
|
||||
# return auto-generated pools
|
||||
# no need to check for their validity
|
||||
return pools
|
||||
pools = ipam_utils.generate_pools(subnet['cidr'], subnet['gateway_ip'])
|
||||
return [{'start': str(netaddr.IPAddress(pool.first)),
|
||||
'end': str(netaddr.IPAddress(pool.last))}
|
||||
for pool in pools]
|
||||
|
||||
def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
|
||||
"""Validate the CIDR for a subnet.
|
||||
|
@ -251,7 +239,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
"""
|
||||
subnet = netaddr.IPNetwork(subnet_cidr)
|
||||
subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
|
||||
subnet_last_ip = netaddr.IPAddress(subnet.last - 1)
|
||||
# last address is broadcast in v4
|
||||
subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4))
|
||||
|
||||
LOG.debug("Performing IP validity checks on allocation pools")
|
||||
ip_sets = []
|
||||
|
@ -308,6 +297,16 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
pool_2=r_range,
|
||||
subnet_cidr=subnet_cidr)
|
||||
|
||||
def _prepare_allocation_pools(self, context, allocation_pools, subnet):
|
||||
if not attributes.is_attr_set(allocation_pools):
|
||||
return self._allocate_pools_for_subnet(context, subnet)
|
||||
|
||||
self._validate_allocation_pools(allocation_pools, subnet['cidr'])
|
||||
if subnet['gateway_ip']:
|
||||
self._validate_gw_out_of_pools(subnet['gateway_ip'],
|
||||
allocation_pools)
|
||||
return allocation_pools
|
||||
|
||||
def _validate_gw_out_of_pools(self, gateway_ip, pools):
|
||||
for allocation_pool in pools:
|
||||
pool_range = netaddr.IPRange(
|
||||
|
@ -318,6 +317,15 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
pool=pool_range,
|
||||
ip_address=gateway_ip)
|
||||
|
||||
def _is_ip_required_by_subnet(self, context, subnet_id, device_owner):
|
||||
# For ports that are not router ports, retain any automatic
|
||||
# (non-optional, e.g. IPv6 SLAAC) addresses.
|
||||
if device_owner in constants.ROUTER_INTERFACE_OWNERS:
|
||||
return True
|
||||
|
||||
subnet = self._get_subnet(context, subnet_id)
|
||||
return not ipv6_utils.is_auto_address_subnet(subnet)
|
||||
|
||||
def _get_changed_ips_for_port(self, context, original_ips,
|
||||
new_ips, device_owner):
|
||||
"""Calculate changes in IPs for the port."""
|
||||
|
@ -326,30 +334,44 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
msg = _('Exceeded maximum amount of fixed ips per port')
|
||||
raise n_exc.InvalidInput(error_message=msg)
|
||||
|
||||
# These ips are still on the port and haven't been removed
|
||||
prev_ips = []
|
||||
add_ips = []
|
||||
remove_ips = []
|
||||
ips_map = {ip['ip_address']: ip
|
||||
for ip in itertools.chain(new_ips, original_ips)
|
||||
if 'ip_address' in ip}
|
||||
|
||||
# Remove all of the intersecting elements
|
||||
for original_ip in original_ips[:]:
|
||||
for new_ip in new_ips[:]:
|
||||
if ('ip_address' in new_ip and
|
||||
original_ip['ip_address'] == new_ip['ip_address']):
|
||||
original_ips.remove(original_ip)
|
||||
new_ips.remove(new_ip)
|
||||
prev_ips.append(original_ip)
|
||||
break
|
||||
new = set()
|
||||
for ip in new_ips:
|
||||
if 'ip_address' in ip:
|
||||
new.add(ip['ip_address'])
|
||||
else:
|
||||
# For ports that are not router ports, retain any automatic
|
||||
# (non-optional, e.g. IPv6 SLAAC) addresses.
|
||||
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
|
||||
subnet = self._get_subnet(context,
|
||||
original_ip['subnet_id'])
|
||||
if (ipv6_utils.is_auto_address_subnet(subnet)):
|
||||
original_ips.remove(original_ip)
|
||||
prev_ips.append(original_ip)
|
||||
return self.Changes(add=new_ips,
|
||||
add_ips.append(ip)
|
||||
|
||||
# Convert original ip addresses to sets
|
||||
orig = set(ip['ip_address'] for ip in original_ips)
|
||||
|
||||
add = new - orig
|
||||
unchanged = new & orig
|
||||
remove = orig - new
|
||||
|
||||
# Convert results back to list of dicts
|
||||
add_ips += [ips_map[ip] for ip in add]
|
||||
prev_ips = [ips_map[ip] for ip in unchanged]
|
||||
|
||||
# Mark ip for removing if it is not found in new_ips
|
||||
# and subnet requires ip to be set manually.
|
||||
# For auto addresses leave ip unchanged
|
||||
for ip in remove:
|
||||
subnet_id = ips_map[ip]['subnet_id']
|
||||
if self._is_ip_required_by_subnet(context, subnet_id,
|
||||
device_owner):
|
||||
remove_ips.append(ips_map[ip])
|
||||
else:
|
||||
prev_ips.append(ips_map[ip])
|
||||
|
||||
return self.Changes(add=add_ips,
|
||||
original=prev_ips,
|
||||
remove=original_ips)
|
||||
remove=remove_ips)
|
||||
|
||||
def _delete_port(self, context, port_id):
|
||||
query = (context.session.query(models_v2.Port).
|
||||
|
@ -364,17 +386,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
dns_nameservers,
|
||||
host_routes,
|
||||
allocation_pools):
|
||||
|
||||
if not attributes.is_attr_set(allocation_pools):
|
||||
allocation_pools = self._allocate_pools_for_subnet(context,
|
||||
subnet_args)
|
||||
else:
|
||||
self._validate_allocation_pools(allocation_pools,
|
||||
subnet_args['cidr'])
|
||||
if subnet_args['gateway_ip']:
|
||||
self._validate_gw_out_of_pools(subnet_args['gateway_ip'],
|
||||
allocation_pools)
|
||||
|
||||
allocation_pools = self._prepare_allocation_pools(context,
|
||||
allocation_pools,
|
||||
subnet_args)
|
||||
self._validate_subnet_cidr(context, network, subnet_args['cidr'])
|
||||
self._validate_network_subnetpools(network,
|
||||
subnet_args['subnetpool_id'],
|
||||
|
|
|
@ -101,7 +101,8 @@ def check_db_schema_version(engine, metadata):
|
|||
)
|
||||
|
||||
|
||||
# Duplicated from neutron.plugins.linuxbridge.common.constants to
|
||||
# Duplicated from
|
||||
# neutron.plugins.ml2.drivers.linuxbridge.agent.common.constants to
|
||||
# avoid having any dependency on the linuxbridge plugin being
|
||||
# installed.
|
||||
def interpret_vlan_id(vlan_id):
|
||||
|
|
|
@ -50,7 +50,6 @@ from neutron.plugins.brocade.db import models as brocade_models # noqa
|
|||
from neutron.plugins.cisco.db.l3 import l3_models # noqa
|
||||
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
|
||||
from neutron.plugins.cisco.db import network_models_v2 # noqa
|
||||
from neutron.plugins.linuxbridge.db import l2network_models_v2 # noqa
|
||||
from neutron.plugins.metaplugin import meta_models_v2 # noqa
|
||||
from neutron.plugins.ml2.drivers.arista import db # noqa
|
||||
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
|
||||
|
@ -60,6 +59,9 @@ from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
|
|||
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
|
||||
nexus_models_v2 as ml2_nexus_models_v2)
|
||||
from neutron.plugins.ml2.drivers.cisco.ucsm import ucsm_model # noqa
|
||||
from neutron.plugins.ml2.drivers.linuxbridge.agent import ( # noqa
|
||||
l2network_models_v2)
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_models_v2 # noqa
|
||||
from neutron.plugins.ml2.drivers import type_flat # noqa
|
||||
from neutron.plugins.ml2.drivers import type_gre # noqa
|
||||
from neutron.plugins.ml2.drivers import type_vlan # noqa
|
||||
|
@ -67,7 +69,6 @@ from neutron.plugins.ml2.drivers import type_vxlan # noqa
|
|||
from neutron.plugins.ml2 import models # noqa
|
||||
from neutron.plugins.nec.db import models as nec_models # noqa
|
||||
from neutron.plugins.nuage import nuage_models # noqa
|
||||
from neutron.plugins.openvswitch import ovs_models_v2 # noqa
|
||||
from neutron.plugins.vmware.dbexts import nsx_models # noqa
|
||||
from neutron.plugins.vmware.dbexts import nsxv_models # noqa
|
||||
from neutron.plugins.vmware.dbexts import vcns_models # noqa
|
||||
|
|
|
@ -75,7 +75,8 @@ def use_jsonutils(logical_line, filename):
|
|||
# Some files in the tree are not meant to be run from inside Neutron
|
||||
# itself, so we should not complain about them not using jsonutils
|
||||
json_check_skipped_patterns = [
|
||||
"neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap",
|
||||
"neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
|
||||
"plugins/netwrap",
|
||||
]
|
||||
|
||||
for pattern in json_check_skipped_patterns:
|
||||
|
|
|
@ -14,10 +14,13 @@ import abc
|
|||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants
|
||||
from neutron.common import ipv6_utils
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
|
||||
|
||||
|
@ -243,28 +246,42 @@ class RouterGatewayAddressRequest(AddressRequest):
|
|||
"""Used to request allocating the special router gateway address."""
|
||||
|
||||
|
||||
class BaseRequestFactory(object):
|
||||
"""Factory class to create right request based on input"""
|
||||
any_request = None
|
||||
specific_request = None
|
||||
address_index = 0
|
||||
class AddressRequestFactory(object):
|
||||
"""Builds request using ip info
|
||||
|
||||
Additional parameters(port and context) are not used in default
|
||||
implementation, but planned to be used in sub-classes
|
||||
provided by specific ipam driver,
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, *args, **kwargs):
|
||||
args_list = [a for a in args]
|
||||
address = args_list.pop(cls.address_index)
|
||||
if not address:
|
||||
return cls.any_request(*args_list, **kwargs)
|
||||
def get_request(cls, context, port, ip):
|
||||
if not ip:
|
||||
return AnyAddressRequest()
|
||||
else:
|
||||
return cls.specific_request(*args, **kwargs)
|
||||
return SpecificAddressRequest(ip)
|
||||
|
||||
|
||||
class AddressRequestFactory(BaseRequestFactory):
|
||||
any_request = AnyAddressRequest
|
||||
specific_request = SpecificAddressRequest
|
||||
class SubnetRequestFactory(object):
|
||||
"""Builds request using subnet info"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, context, subnet, subnetpool):
|
||||
cidr = subnet.get('cidr')
|
||||
subnet_id = subnet.get('id', uuidutils.generate_uuid())
|
||||
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
|
||||
|
||||
class SubnetRequestFactory(BaseRequestFactory):
|
||||
any_request = AnySubnetRequest
|
||||
specific_request = SpecificSubnetRequest
|
||||
address_index = 2
|
||||
if is_any_subnetpool_request:
|
||||
prefixlen = subnet['prefixlen']
|
||||
if not attributes.is_attr_set(prefixlen):
|
||||
prefixlen = int(subnetpool['default_prefixlen'])
|
||||
|
||||
return AnySubnetRequest(
|
||||
subnet['tenant_id'],
|
||||
subnet_id,
|
||||
common_utils.ip_version_from_int(subnetpool['ip_version']),
|
||||
prefixlen)
|
||||
else:
|
||||
return SpecificSubnetRequest(subnet['tenant_id'],
|
||||
subnet_id,
|
||||
cidr)
|
||||
|
|
|
@ -21,28 +21,38 @@ def check_subnet_ip(cidr, ip_address):
|
|||
ip = netaddr.IPAddress(ip_address)
|
||||
net = netaddr.IPNetwork(cidr)
|
||||
# Check that the IP is valid on subnet. This cannot be the
|
||||
# network or the broadcast address
|
||||
return (ip != net.network and ip != net.broadcast
|
||||
# network or the broadcast address (which exists only in IPv4)
|
||||
return (ip != net.network
|
||||
and (net.version == 6 or ip != net.broadcast)
|
||||
and net.netmask & ip == net.network)
|
||||
|
||||
|
||||
def check_gateway_in_subnet(cidr, gateway):
|
||||
"""Validate that the gateway is on the subnet."""
|
||||
ip = netaddr.IPAddress(gateway)
|
||||
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
|
||||
return check_subnet_ip(cidr, gateway)
|
||||
return True
|
||||
|
||||
|
||||
def generate_pools(cidr, gateway_ip):
|
||||
"""Create IP allocation pools for a specified subnet
|
||||
|
||||
The Neutron API defines a subnet's allocation pools as a list of
|
||||
IPRange objects for defining the pool range.
|
||||
"""
|
||||
pools = []
|
||||
# Auto allocate the pool around gateway_ip
|
||||
net = netaddr.IPNetwork(cidr)
|
||||
if net.first == net.last:
|
||||
# handle single address subnet case
|
||||
return [netaddr.IPRange(net.first, net.last)]
|
||||
first_ip = net.first + 1
|
||||
last_ip = net.last - 1
|
||||
gw_ip = int(netaddr.IPAddress(gateway_ip or net.last))
|
||||
# Use the gw_ip to find a point for splitting allocation pools
|
||||
# for this subnet
|
||||
split_ip = min(max(gw_ip, net.first), net.last)
|
||||
if split_ip > first_ip:
|
||||
pools.append(netaddr.IPRange(first_ip, split_ip - 1))
|
||||
if split_ip < last_ip:
|
||||
pools.append(netaddr.IPRange(split_ip + 1, last_ip))
|
||||
return pools
|
||||
# last address is broadcast in v4
|
||||
last_ip = net.last - (net.version == 4)
|
||||
if first_ip >= last_ip:
|
||||
# /31 lands here
|
||||
return []
|
||||
ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip))
|
||||
if gateway_ip:
|
||||
ipset.remove(netaddr.IPAddress(gateway_ip))
|
||||
return list(ipset.iter_ipranges())
|
||||
|
|
|
@ -18,12 +18,12 @@ import weakref
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import periodic_task
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
|
||||
from neutron.common import utils
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import periodic_task
|
||||
from neutron.plugins.common import constants
|
||||
|
||||
from stevedore import driver
|
||||
|
@ -43,7 +43,8 @@ class Manager(periodic_task.PeriodicTasks):
|
|||
if not host:
|
||||
host = cfg.CONF.host
|
||||
self.host = host
|
||||
super(Manager, self).__init__()
|
||||
conf = getattr(self, "conf", cfg.CONF)
|
||||
super(Manager, self).__init__(conf)
|
||||
|
||||
def periodic_tasks(self, context, raise_on_error=False):
|
||||
self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
||||
|
|
|
@ -1,151 +0,0 @@
|
|||
# Copyright (c) 2012 OpenStack Foundation.
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import gc
|
||||
import logging
|
||||
import os
|
||||
import pprint
|
||||
import socket
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet.backdoor
|
||||
import greenlet
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.openstack.common._i18n import _LI
|
||||
|
||||
help_for_backdoor_port = (
|
||||
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
|
||||
"in listening on a random tcp port number; <port> results in listening "
|
||||
"on the specified port number (and not enabling backdoor if that port "
|
||||
"is in use); and <start>:<end> results in listening on the smallest "
|
||||
"unused port number within the specified range of port numbers. The "
|
||||
"chosen port is displayed in the service's log file.")
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.StrOpt('backdoor_port',
|
||||
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(eventlet_backdoor_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo-config-generator.
|
||||
"""
|
||||
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
|
||||
|
||||
|
||||
class EventletBackdoorConfigValueError(Exception):
|
||||
def __init__(self, port_range, help_msg, ex):
|
||||
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
|
||||
'%(help)s' %
|
||||
{'range': port_range, 'ex': ex, 'help': help_msg})
|
||||
super(EventletBackdoorConfigValueError, self).__init__(msg)
|
||||
self.port_range = port_range
|
||||
|
||||
|
||||
def _dont_use_this():
|
||||
print("Don't use this, just disconnect instead")
|
||||
|
||||
|
||||
def _find_objects(t):
|
||||
return [o for o in gc.get_objects() if isinstance(o, t)]
|
||||
|
||||
|
||||
def _print_greenthreads():
|
||||
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
|
||||
print(i, gt)
|
||||
traceback.print_stack(gt.gr_frame)
|
||||
print()
|
||||
|
||||
|
||||
def _print_nativethreads():
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
print(threadId)
|
||||
traceback.print_stack(stack)
|
||||
print()
|
||||
|
||||
|
||||
def _parse_port_range(port_range):
|
||||
if ':' not in port_range:
|
||||
start, end = port_range, port_range
|
||||
else:
|
||||
start, end = port_range.split(':', 1)
|
||||
try:
|
||||
start, end = int(start), int(end)
|
||||
if end < start:
|
||||
raise ValueError
|
||||
return start, end
|
||||
except ValueError as ex:
|
||||
raise EventletBackdoorConfigValueError(port_range, ex,
|
||||
help_for_backdoor_port)
|
||||
|
||||
|
||||
def _listen(host, start_port, end_port, listen_func):
|
||||
try_port = start_port
|
||||
while True:
|
||||
try:
|
||||
return listen_func((host, try_port))
|
||||
except socket.error as exc:
|
||||
if (exc.errno != errno.EADDRINUSE or
|
||||
try_port >= end_port):
|
||||
raise
|
||||
try_port += 1
|
||||
|
||||
|
||||
def initialize_if_enabled():
|
||||
backdoor_locals = {
|
||||
'exit': _dont_use_this, # So we don't exit the entire process
|
||||
'quit': _dont_use_this, # So we don't exit the entire process
|
||||
'fo': _find_objects,
|
||||
'pgt': _print_greenthreads,
|
||||
'pnt': _print_nativethreads,
|
||||
}
|
||||
|
||||
if CONF.backdoor_port is None:
|
||||
return None
|
||||
|
||||
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
|
||||
|
||||
# NOTE(johannes): The standard sys.displayhook will print the value of
|
||||
# the last expression and set it to __builtin__._, which overwrites
|
||||
# the __builtin__._ that gettext sets. Let's switch to using pprint
|
||||
# since it won't interact poorly with gettext, and it's easier to
|
||||
# read the output too.
|
||||
def displayhook(val):
|
||||
if val is not None:
|
||||
pprint.pprint(val)
|
||||
sys.displayhook = displayhook
|
||||
|
||||
sock = _listen('localhost', start_port, end_port, eventlet.listen)
|
||||
|
||||
# In the case of backdoor port being zero, a port number is assigned by
|
||||
# listen(). In any case, pull the port number out here.
|
||||
port = sock.getsockname()[1]
|
||||
LOG.info(
|
||||
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
|
||||
{'port': port, 'pid': os.getpid()}
|
||||
)
|
||||
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
|
||||
locals=backdoor_locals)
|
||||
return port
|
|
@ -1,147 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from neutron.openstack.common._i18n import _LE, _LW
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
|
||||
# with time.time() called in the standard logging module
|
||||
# during unittests.
|
||||
_ts = lambda: time.time()
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCallBase.
|
||||
|
||||
The poll-function passed to LoopingCallBase can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCallBase.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallBase(object):
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._running = False
|
||||
self.done = None
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
def start(self, interval, initial_delay=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
start = _ts()
|
||||
self.f(*self.args, **self.kw)
|
||||
end = _ts()
|
||||
if not self._running:
|
||||
break
|
||||
delay = end - start - interval
|
||||
if delay > 0:
|
||||
LOG.warning(_LW('task %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec'),
|
||||
{'func_name': self.f, 'delay': delay})
|
||||
greenthread.sleep(-delay if delay < 0 else 0)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_LE('in fixed duration looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn_n(_inner)
|
||||
return self.done
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None):
|
||||
self._running = True
|
||||
done = event.Event()
|
||||
|
||||
def _inner():
|
||||
if initial_delay:
|
||||
greenthread.sleep(initial_delay)
|
||||
|
||||
try:
|
||||
while self._running:
|
||||
idle = self.f(*self.args, **self.kw)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if periodic_interval_max is not None:
|
||||
idle = min(idle, periodic_interval_max)
|
||||
LOG.debug('Dynamic looping call %(func_name)r sleeping '
|
||||
'for %(idle).02f seconds',
|
||||
{'func_name': self.f, 'idle': idle})
|
||||
greenthread.sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.stop()
|
||||
done.send(e.retvalue)
|
||||
except Exception:
|
||||
LOG.exception(_LE('in dynamic looping call'))
|
||||
done.send_exception(*sys.exc_info())
|
||||
return
|
||||
else:
|
||||
done.send(True)
|
||||
|
||||
self.done = done
|
||||
|
||||
greenthread.spawn(_inner)
|
||||
return self.done
|
|
@ -1,232 +0,0 @@
|
|||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from neutron.openstack.common._i18n import _, _LE, _LI
|
||||
|
||||
|
||||
periodic_opts = [
|
||||
cfg.BoolOpt('run_external_periodic_tasks',
|
||||
default=True,
|
||||
help='Some periodic tasks can be run in a separate process. '
|
||||
'Should we run them here?'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(periodic_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_INTERVAL = 60.0
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo-config-generator."""
|
||||
return [(None, copy.deepcopy(periodic_opts))]
|
||||
|
||||
|
||||
class InvalidPeriodicTaskArg(Exception):
|
||||
message = _("Unexpected argument for periodic task creation: %(arg)s.")
|
||||
|
||||
|
||||
def periodic_task(*args, **kwargs):
|
||||
"""Decorator to indicate that a method is a periodic task.
|
||||
|
||||
This decorator can be used in two ways:
|
||||
|
||||
1. Without arguments '@periodic_task', this will be run on the default
|
||||
interval of 60 seconds.
|
||||
|
||||
2. With arguments:
|
||||
@periodic_task(spacing=N [, run_immediately=[True|False]]
|
||||
[, name=[None|"string"])
|
||||
this will be run on approximately every N seconds. If this number is
|
||||
negative the periodic task will be disabled. If the run_immediately
|
||||
argument is provided and has a value of 'True', the first run of the
|
||||
task will be shortly after task scheduler starts. If
|
||||
run_immediately is omitted or set to 'False', the first time the
|
||||
task runs will be approximately N seconds after the task scheduler
|
||||
starts. If name is not provided, __name__ of function is used.
|
||||
"""
|
||||
def decorator(f):
|
||||
# Test for old style invocation
|
||||
if 'ticks_between_runs' in kwargs:
|
||||
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
|
||||
|
||||
# Control if run at all
|
||||
f._periodic_task = True
|
||||
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
|
||||
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
|
||||
f._periodic_enabled = False
|
||||
else:
|
||||
f._periodic_enabled = kwargs.pop('enabled', True)
|
||||
f._periodic_name = kwargs.pop('name', f.__name__)
|
||||
|
||||
# Control frequency
|
||||
f._periodic_spacing = kwargs.pop('spacing', 0)
|
||||
f._periodic_immediate = kwargs.pop('run_immediately', False)
|
||||
if f._periodic_immediate:
|
||||
f._periodic_last_run = None
|
||||
else:
|
||||
f._periodic_last_run = time.time()
|
||||
return f
|
||||
|
||||
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
|
||||
# and without parenthesis.
|
||||
#
|
||||
# In the 'with-parenthesis' case (with kwargs present), this function needs
|
||||
# to return a decorator function since the interpreter will invoke it like:
|
||||
#
|
||||
# periodic_task(*args, **kwargs)(f)
|
||||
#
|
||||
# In the 'without-parenthesis' case, the original function will be passed
|
||||
# in as the first argument, like:
|
||||
#
|
||||
# periodic_task(f)
|
||||
if kwargs:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(args[0])
|
||||
|
||||
|
||||
class _PeriodicTasksMeta(type):
|
||||
def _add_periodic_task(cls, task):
|
||||
"""Add a periodic task to the list of periodic tasks.
|
||||
|
||||
The task should already be decorated by @periodic_task.
|
||||
|
||||
:return: whether task was actually enabled
|
||||
"""
|
||||
name = task._periodic_name
|
||||
|
||||
if task._periodic_spacing < 0:
|
||||
LOG.info(_LI('Skipping periodic task %(task)s because '
|
||||
'its interval is negative'),
|
||||
{'task': name})
|
||||
return False
|
||||
if not task._periodic_enabled:
|
||||
LOG.info(_LI('Skipping periodic task %(task)s because '
|
||||
'it is disabled'),
|
||||
{'task': name})
|
||||
return False
|
||||
|
||||
# A periodic spacing of zero indicates that this task should
|
||||
# be run on the default interval to avoid running too
|
||||
# frequently.
|
||||
if task._periodic_spacing == 0:
|
||||
task._periodic_spacing = DEFAULT_INTERVAL
|
||||
|
||||
cls._periodic_tasks.append((name, task))
|
||||
cls._periodic_spacing[name] = task._periodic_spacing
|
||||
return True
|
||||
|
||||
def __init__(cls, names, bases, dict_):
|
||||
"""Metaclass that allows us to collect decorated periodic tasks."""
|
||||
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
|
||||
|
||||
# NOTE(sirp): if the attribute is not present then we must be the base
|
||||
# class, so, go ahead an initialize it. If the attribute is present,
|
||||
# then we're a subclass so make a copy of it so we don't step on our
|
||||
# parent's toes.
|
||||
try:
|
||||
cls._periodic_tasks = cls._periodic_tasks[:]
|
||||
except AttributeError:
|
||||
cls._periodic_tasks = []
|
||||
|
||||
try:
|
||||
cls._periodic_spacing = cls._periodic_spacing.copy()
|
||||
except AttributeError:
|
||||
cls._periodic_spacing = {}
|
||||
|
||||
for value in cls.__dict__.values():
|
||||
if getattr(value, '_periodic_task', False):
|
||||
cls._add_periodic_task(value)
|
||||
|
||||
|
||||
def _nearest_boundary(last_run, spacing):
|
||||
"""Find nearest boundary which is in the past, which is a multiple of the
|
||||
spacing with the last run as an offset.
|
||||
|
||||
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
|
||||
31, 38...
|
||||
|
||||
0% to 5% of the spacing value will be added to this value to ensure tasks
|
||||
do not synchronize. This jitter is rounded to the nearest second, this
|
||||
means that spacings smaller than 20 seconds will not have jitter.
|
||||
"""
|
||||
current_time = time.time()
|
||||
if last_run is None:
|
||||
return current_time
|
||||
delta = current_time - last_run
|
||||
offset = delta % spacing
|
||||
# Add up to 5% jitter
|
||||
jitter = int(spacing * (random.random() / 20))
|
||||
return current_time - offset + jitter
|
||||
|
||||
|
||||
@six.add_metaclass(_PeriodicTasksMeta)
|
||||
class PeriodicTasks(object):
|
||||
def __init__(self):
|
||||
super(PeriodicTasks, self).__init__()
|
||||
self._periodic_last_run = {}
|
||||
for name, task in self._periodic_tasks:
|
||||
self._periodic_last_run[name] = task._periodic_last_run
|
||||
|
||||
def add_periodic_task(self, task):
|
||||
"""Add a periodic task to the list of periodic tasks.
|
||||
|
||||
The task should already be decorated by @periodic_task.
|
||||
"""
|
||||
if self.__class__._add_periodic_task(task):
|
||||
self._periodic_last_run[task._periodic_name] = (
|
||||
task._periodic_last_run)
|
||||
|
||||
def run_periodic_tasks(self, context, raise_on_error=False):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
idle_for = DEFAULT_INTERVAL
|
||||
for task_name, task in self._periodic_tasks:
|
||||
full_task_name = '.'.join([self.__class__.__name__, task_name])
|
||||
|
||||
spacing = self._periodic_spacing[task_name]
|
||||
last_run = self._periodic_last_run[task_name]
|
||||
|
||||
# Check if due, if not skip
|
||||
idle_for = min(idle_for, spacing)
|
||||
if last_run is not None:
|
||||
delta = last_run + spacing - time.time()
|
||||
if delta > 0:
|
||||
idle_for = min(idle_for, delta)
|
||||
continue
|
||||
|
||||
LOG.debug("Running periodic task %(full_task_name)s",
|
||||
{"full_task_name": full_task_name})
|
||||
self._periodic_last_run[task_name] = _nearest_boundary(
|
||||
last_run, spacing)
|
||||
|
||||
try:
|
||||
task(self, context)
|
||||
except Exception:
|
||||
if raise_on_error:
|
||||
raise
|
||||
LOG.exception(_LE("Error during %(full_task_name)s"),
|
||||
{"full_task_name": full_task_name})
|
||||
time.sleep(0)
|
||||
|
||||
return idle_for
|
|
@ -1,507 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import errno
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet import event
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.openstack.common import eventlet_backdoor
|
||||
from neutron.openstack.common._i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import systemd
|
||||
from neutron.openstack.common import threadgroup
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _sighup_supported():
|
||||
return hasattr(signal, 'SIGHUP')
|
||||
|
||||
|
||||
def _is_daemon():
|
||||
# The process group for a foreground process will match the
|
||||
# process group of the controlling terminal. If those values do
|
||||
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||
# the process is running in the background as a daemon.
|
||||
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||
try:
|
||||
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
# Could not get the fileno for stdout, so we must be a daemon.
|
||||
is_daemon = True
|
||||
except OSError as err:
|
||||
if err.errno == errno.ENOTTY:
|
||||
# Assume we are a daemon because there is no terminal.
|
||||
is_daemon = True
|
||||
else:
|
||||
raise
|
||||
return is_daemon
|
||||
|
||||
|
||||
def _is_sighup_and_daemon(signo):
|
||||
if not (_sighup_supported() and signo == signal.SIGHUP):
|
||||
# Avoid checking if we are a daemon, because the signal isn't
|
||||
# SIGHUP.
|
||||
return False
|
||||
return _is_daemon()
|
||||
|
||||
|
||||
def _signo_to_signame(signo):
|
||||
signals = {signal.SIGTERM: 'SIGTERM',
|
||||
signal.SIGINT: 'SIGINT'}
|
||||
if _sighup_supported():
|
||||
signals[signal.SIGHUP] = 'SIGHUP'
|
||||
return signals[signo]
|
||||
|
||||
|
||||
def _set_signals_handler(handler):
|
||||
signal.signal(signal.SIGTERM, handler)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
if _sighup_supported():
|
||||
signal.signal(signal.SIGHUP, handler)
|
||||
|
||||
|
||||
class Launcher(object):
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services = Services()
|
||||
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
|
||||
|
||||
def launch_service(self, service):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self.services.add(service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Waits until all services have been stopped, and then returns.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.wait()
|
||||
|
||||
def restart(self):
|
||||
"""Reload config files and restart service.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
cfg.CONF.reload_config_files()
|
||||
self.services.restart()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super(SignalExit, self).__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
def _handle_signal(self, signo, frame):
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
_set_signals_handler(signal.SIG_DFL)
|
||||
raise SignalExit(signo)
|
||||
|
||||
def handle_signal(self):
|
||||
_set_signals_handler(self._handle_signal)
|
||||
|
||||
def _wait_for_exit_or_signal(self, ready_callback=None):
|
||||
status = None
|
||||
signo = 0
|
||||
|
||||
LOG.debug('Full set of CONF:')
|
||||
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
if ready_callback:
|
||||
ready_callback()
|
||||
super(ServiceLauncher, self).wait()
|
||||
except SignalExit as exc:
|
||||
signame = _signo_to_signame(exc.signo)
|
||||
LOG.info(_LI('Caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
finally:
|
||||
self.stop()
|
||||
|
||||
return status, signo
|
||||
|
||||
def wait(self, ready_callback=None):
|
||||
systemd.notify_once()
|
||||
while True:
|
||||
self.handle_signal()
|
||||
status, signo = self._wait_for_exit_or_signal(ready_callback)
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
return status
|
||||
self.restart()
|
||||
|
||||
|
||||
class ServiceWrapper(object):
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher(object):
|
||||
_signal_handlers_set = set()
|
||||
|
||||
@classmethod
|
||||
def _handle_class_signals(cls, *args, **kwargs):
|
||||
for handler in cls._signal_handlers_set:
|
||||
handler(*args, **kwargs)
|
||||
|
||||
def __init__(self, wait_interval=0.01):
|
||||
"""Constructor.
|
||||
|
||||
:param wait_interval: The interval to sleep for between checks
|
||||
of child process exit.
|
||||
"""
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
self.wait_interval = wait_interval
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
self.handle_signal()
|
||||
|
||||
def handle_signal(self):
|
||||
self._signal_handlers_set.add(self._handle_signal)
|
||||
_set_signals_handler(self._handle_class_signals)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
_set_signals_handler(signal.SIG_DFL)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read(1)
|
||||
|
||||
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process_handle_signal(self):
|
||||
# Setup child signal handlers differently
|
||||
def _sighup(*args):
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
# Parent signals with SIGTERM when it wants us to go away.
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
if _sighup_supported():
|
||||
signal.signal(signal.SIGHUP, _sighup)
|
||||
# Block SIGINT and let the parent send us a SIGTERM
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def _child_wait_for_exit_or_signal(self, launcher):
|
||||
status = 0
|
||||
signo = 0
|
||||
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
try:
|
||||
launcher.wait()
|
||||
except SignalExit as exc:
|
||||
signame = _signo_to_signame(exc.signo)
|
||||
LOG.info(_LI('Child caught %s, exiting'), signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
LOG.exception(_LE('Unhandled exception'))
|
||||
status = 2
|
||||
finally:
|
||||
launcher.stop()
|
||||
|
||||
return status, signo
|
||||
|
||||
def _child_process(self, service):
|
||||
self._child_process_handle_signal()
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher()
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info(_LI('Forking too fast, sleeping'))
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
launcher = self._child_process(wrap.service)
|
||||
while True:
|
||||
self._child_process_handle_signal()
|
||||
status, signo = self._child_wait_for_exit_or_signal(launcher)
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
break
|
||||
launcher.restart()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.info(_LI('Started child %d'), pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
LOG.info(_LI('Starting %d workers'), wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning(_LW('pid %d not in child list'), pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def _respawn_children(self):
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(self.wait_interval)
|
||||
continue
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
systemd.notify_once()
|
||||
LOG.debug('Full set of CONF:')
|
||||
CONF.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
while True:
|
||||
self.handle_signal()
|
||||
self._respawn_children()
|
||||
# No signal means that stop was called. Don't clean up here.
|
||||
if not self.sigcaught:
|
||||
return
|
||||
|
||||
signame = _signo_to_signame(self.sigcaught)
|
||||
LOG.info(_LI('Caught %s, stopping children'), signame)
|
||||
if not _is_sighup_and_daemon(self.sigcaught):
|
||||
break
|
||||
|
||||
cfg.CONF.reload_config_files()
|
||||
for service in set(
|
||||
[wrap.service for wrap in self.children.values()]):
|
||||
service.reset()
|
||||
|
||||
for pid in self.children:
|
||||
os.kill(pid, signal.SIGHUP)
|
||||
|
||||
self.running = True
|
||||
self.sigcaught = None
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
|
||||
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""Terminate child processes and wait on each."""
|
||||
self.running = False
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(object):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
# signal that the service is done shutting itself down:
|
||||
self._done = event.Event()
|
||||
|
||||
def reset(self):
|
||||
# NOTE(Fengqian): docs for Event.reset() recommend against using it
|
||||
self._done = event.Event()
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self, graceful=False):
|
||||
self.tg.stop(graceful)
|
||||
self.tg.wait()
|
||||
# Signal that service cleanup is done:
|
||||
if not self._done.ready():
|
||||
self._done.send()
|
||||
|
||||
def wait(self):
|
||||
self._done.wait()
|
||||
|
||||
|
||||
class Services(object):
|
||||
|
||||
def __init__(self):
|
||||
self.services = []
|
||||
self.tg = threadgroup.ThreadGroup()
|
||||
self.done = event.Event()
|
||||
|
||||
def add(self, service):
|
||||
self.services.append(service)
|
||||
self.tg.add_thread(self.run_service, service, self.done)
|
||||
|
||||
def stop(self):
|
||||
# wait for graceful shutdown of services:
|
||||
for service in self.services:
|
||||
service.stop()
|
||||
service.wait()
|
||||
|
||||
# Each service has performed cleanup, now signal that the run_service
|
||||
# wrapper threads can now die:
|
||||
if not self.done.ready():
|
||||
self.done.send()
|
||||
|
||||
# reap threads:
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
self.tg.wait()
|
||||
|
||||
def restart(self):
|
||||
self.stop()
|
||||
self.done = event.Event()
|
||||
for restart_service in self.services:
|
||||
restart_service.reset()
|
||||
self.tg.add_thread(self.run_service, restart_service, self.done)
|
||||
|
||||
@staticmethod
|
||||
def run_service(service, done):
|
||||
"""Service start wrapper.
|
||||
|
||||
:param service: service to run
|
||||
:param done: event to wait on until a shutdown is triggered
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
service.start()
|
||||
done.wait()
|
||||
|
||||
|
||||
def launch(service, workers=1):
|
||||
if workers is None or workers == 1:
|
||||
launcher = ServiceLauncher()
|
||||
launcher.launch_service(service)
|
||||
else:
|
||||
launcher = ProcessLauncher()
|
||||
launcher.launch_service(service, workers=workers)
|
||||
|
||||
return launcher
|
|
@ -1,105 +0,0 @@
|
|||
# Copyright 2012-2014 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Helper module for systemd service readiness notification.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _abstractify(socket_name):
|
||||
if socket_name.startswith('@'):
|
||||
# abstract namespace socket
|
||||
socket_name = '\0%s' % socket_name[1:]
|
||||
return socket_name
|
||||
|
||||
|
||||
def _sd_notify(unset_env, msg):
|
||||
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||
if notify_socket:
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||
try:
|
||||
sock.connect(_abstractify(notify_socket))
|
||||
sock.sendall(msg)
|
||||
if unset_env:
|
||||
del os.environ['NOTIFY_SOCKET']
|
||||
except EnvironmentError:
|
||||
LOG.debug("Systemd notification failed", exc_info=True)
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
|
||||
def notify():
|
||||
"""Send notification to Systemd that service is ready.
|
||||
|
||||
For details see
|
||||
http://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||
"""
|
||||
_sd_notify(False, 'READY=1')
|
||||
|
||||
|
||||
def notify_once():
|
||||
"""Send notification once to Systemd that service is ready.
|
||||
|
||||
Systemd sets NOTIFY_SOCKET environment variable with the name of the
|
||||
socket listening for notifications from services.
|
||||
This method removes the NOTIFY_SOCKET environment variable to ensure
|
||||
notification is sent only once.
|
||||
"""
|
||||
_sd_notify(True, 'READY=1')
|
||||
|
||||
|
||||
def onready(notify_socket, timeout):
|
||||
"""Wait for systemd style notification on the socket.
|
||||
|
||||
:param notify_socket: local socket address
|
||||
:type notify_socket: string
|
||||
:param timeout: socket timeout
|
||||
:type timeout: float
|
||||
:returns: 0 service ready
|
||||
1 service not ready
|
||||
2 timeout occurred
|
||||
"""
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||
sock.settimeout(timeout)
|
||||
sock.bind(_abstractify(notify_socket))
|
||||
try:
|
||||
msg = sock.recv(512)
|
||||
except socket.timeout:
|
||||
return 2
|
||||
finally:
|
||||
sock.close()
|
||||
if 'READY=1' in msg:
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# simple CLI for testing
|
||||
if len(sys.argv) == 1:
|
||||
notify()
|
||||
elif len(sys.argv) >= 2:
|
||||
timeout = float(sys.argv[1])
|
||||
notify_socket = os.getenv('NOTIFY_SOCKET')
|
||||
if notify_socket:
|
||||
retval = onready(notify_socket, timeout)
|
||||
sys.exit(retval)
|
|
@ -1,150 +0,0 @@
|
|||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenpool
|
||||
|
||||
from neutron.openstack.common._i18n import _LE
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _thread_done(gt, *args, **kwargs):
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn()
|
||||
Calls the :class:`ThreadGroup` to notify if.
|
||||
|
||||
"""
|
||||
kwargs['group'].thread_done(kwargs['thread'])
|
||||
|
||||
|
||||
class Thread(object):
|
||||
"""Wrapper around a greenthread, that holds a reference to the
|
||||
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
|
||||
it has done so it can be removed from the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group):
|
||||
self.thread = thread
|
||||
self.thread.link(_thread_done, group=group, thread=self)
|
||||
|
||||
def stop(self):
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
return self.thread.wait()
|
||||
|
||||
def link(self, func, *args, **kwargs):
|
||||
self.thread.link(func, *args, **kwargs)
|
||||
|
||||
|
||||
class ThreadGroup(object):
|
||||
"""The point of the ThreadGroup class is to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
"""
|
||||
def __init__(self, thread_pool_size=10):
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
self.timers.append(timer)
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay)
|
||||
self.timers.append(pulse)
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self)
|
||||
self.threads.append(th)
|
||||
return th
|
||||
|
||||
def thread_done(self, thread):
|
||||
self.threads.remove(thread)
|
||||
|
||||
def _stop_threads(self):
|
||||
current = threading.current_thread()
|
||||
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x is current:
|
||||
# don't kill the current thread.
|
||||
continue
|
||||
try:
|
||||
x.stop()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception(_LE('Error stopping thread.'))
|
||||
|
||||
def stop_timers(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.stop()
|
||||
except Exception:
|
||||
LOG.exception(_LE('Error stopping timer.'))
|
||||
self.timers = []
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""stop function has the option of graceful=True/False.
|
||||
|
||||
* In case of graceful=True, wait for all threads to be finished.
|
||||
Never kill threads.
|
||||
* In case of graceful=False, kill threads immediately.
|
||||
"""
|
||||
self.stop_timers()
|
||||
if graceful:
|
||||
# In case of graceful=True, wait for all threads to be
|
||||
# finished, never kill threads
|
||||
self.wait()
|
||||
else:
|
||||
# In case of graceful=False(Default), kill threads
|
||||
# immediately
|
||||
self._stop_threads()
|
||||
|
||||
def wait(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception(_LE('Error waiting on ThreadGroup.'))
|
||||
current = threading.current_thread()
|
||||
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x is current:
|
||||
continue
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
|
@ -20,6 +20,7 @@ from hyperv.neutron import hyperv_neutron_agent
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.agent import securitygroups_rpc as sg_rpc
|
||||
|
@ -28,7 +29,6 @@ from neutron.common import rpc as n_rpc
|
|||
from neutron.common import topics
|
||||
from neutron import context
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
|
|
@ -25,6 +25,7 @@ eventlet.monkey_patch()
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
import six
|
||||
|
||||
from neutron.agent.common import ovs_lib
|
||||
|
@ -36,7 +37,6 @@ from neutron.common import topics
|
|||
from neutron.common import utils as n_utils
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron import context
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.ibm.common import constants
|
||||
|
||||
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_service import loopingcall
|
||||
|
||||
from neutron.common import constants as n_constants
|
||||
from neutron import context
|
||||
from neutron.i18n import _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.ml2 import db as l2_db
|
||||
from neutron.plugins.ml2 import driver_context
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ from oslo_concurrency import lockutils
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import periodic_task
|
||||
from oslo_service import service as svc
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.linux import ip_lib
|
||||
|
@ -33,8 +35,6 @@ from neutron.common import utils as neutron_utils
|
|||
from neutron.db import agents_db
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import periodic_task
|
||||
from neutron.openstack.common import service as svc
|
||||
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as ma
|
||||
from neutron.plugins.ml2.drivers import type_vlan # noqa
|
||||
|
||||
|
@ -325,7 +325,7 @@ def launch(binary, manager, topic=None):
|
|||
server = service.Service.create(
|
||||
binary=binary, manager=manager, topic=topic,
|
||||
report_interval=report_period, periodic_interval=poll_period)
|
||||
svc.launch(server).wait()
|
||||
svc.launch(cfg.CONF, server).wait()
|
||||
|
||||
|
||||
def service_main():
|
||||
|
|
|
@ -29,7 +29,7 @@ Basic work flow
|
|||
| | Mechanism Driver |
|
||||
+-+--------+----------------------+
|
||||
|
|
||||
| ReST API
|
||||
| REST API
|
||||
|
|
||||
+----------+-------------+
|
||||
| CRD server |
|
||||
|
|
|
@ -29,9 +29,10 @@ eventlet.monkey_patch()
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_service import service
|
||||
from six import moves
|
||||
|
||||
from neutron.agent import l2population_rpc as l2pop_rpc
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
|
@ -43,11 +44,12 @@ from neutron.common import topics
|
|||
from neutron.common import utils as q_utils
|
||||
from neutron import context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import service
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.linuxbridge.common import config # noqa
|
||||
from neutron.plugins.linuxbridge.common import constants as lconst
|
||||
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
|
||||
import l2population_rpc as l2pop_rpc
|
||||
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
|
||||
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
|
||||
import constants as lconst
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -1053,7 +1055,7 @@ def main():
|
|||
polling_interval,
|
||||
quitting_rpc_timeout)
|
||||
LOG.info(_LI("Agent initialized successfully, now running... "))
|
||||
launcher = service.launch(agent)
|
||||
launcher = service.launch(cfg.CONF, agent)
|
||||
launcher.wait()
|
||||
|
||||
|
|
@ -21,8 +21,9 @@ from oslo_log import log as logging
|
|||
import six
|
||||
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.plugins.sriovnicagent.common import exceptions as exc
|
||||
from neutron.plugins.sriovnicagent import pci_lib
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
|
||||
import exceptions as exc
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
|
@ -19,7 +19,8 @@ from oslo_log import log as logging
|
|||
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.plugins.sriovnicagent.common import exceptions as exc
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
|
||||
import exceptions as exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
|
@ -24,6 +24,7 @@ eventlet.monkey_patch()
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.agent import securitygroups_rpc as sg_rpc
|
||||
|
@ -33,10 +34,10 @@ from neutron.common import topics
|
|||
from neutron.common import utils as q_utils
|
||||
from neutron import context
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.sriovnicagent.common import config # noqa
|
||||
from neutron.plugins.sriovnicagent.common import exceptions as exc
|
||||
from neutron.plugins.sriovnicagent import eswitch_manager as esm
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
|
||||
import exceptions as exc
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
|
@ -22,7 +22,8 @@ from neutron.extensions import portbindings
|
|||
from neutron.i18n import _LE, _LW
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.ml2 import driver_api as api
|
||||
from neutron.plugins.ml2.drivers.mech_sriov import exceptions as exc
|
||||
from neutron.plugins.ml2.drivers.mech_sriov.mech_driver \
|
||||
import exceptions as exc
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
|
@ -16,7 +16,8 @@ from oslo_config import cfg
|
|||
|
||||
from neutron.agent.common import config
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
|
||||
import constants
|
||||
|
||||
|
||||
DEFAULT_BRIDGE_MAPPINGS = []
|
|
@ -26,11 +26,13 @@ from neutron.common import utils as n_utils
|
|||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
|
||||
'common.config')
|
||||
|
||||
|
||||
_main_modules = {
|
||||
'ovs-ofctl': 'neutron.plugins.openvswitch.agent.openflow.ovs_ofctl.main',
|
||||
'ovs-ofctl': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.'
|
||||
'ovs_ofctl.main',
|
||||
}
|
||||
|
||||
|
|
@ -20,8 +20,9 @@
|
|||
"""
|
||||
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import ovs_bridge
|
||||
|
||||
|
||||
class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge):
|
|
@ -14,9 +14,11 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_dvr_process
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import br_dvr_process
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import ovs_bridge
|
||||
|
||||
|
||||
class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge,
|
|
@ -34,9 +34,12 @@ import functools
|
|||
import netaddr
|
||||
|
||||
from neutron.agent.common import ovs_lib
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_dvr_process
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
|
||||
import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import br_dvr_process
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import ovs_bridge
|
||||
|
||||
|
||||
class OVSTunnelBridge(ovs_bridge.OVSAgentBridge,
|
|
@ -14,10 +14,13 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_int
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_phys
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_tun
|
||||
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import br_int
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import br_phys
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import br_tun
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
|
||||
|
||||
|
||||
def init_config():
|
|
@ -16,7 +16,8 @@
|
|||
|
||||
|
||||
from neutron.agent.common import ovs_lib
|
||||
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ofswitch
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
|
||||
import ofswitch
|
||||
|
||||
|
||||
class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, ovs_lib.OVSBridge):
|
|
@ -21,7 +21,7 @@ from neutron.common import constants as n_const
|
|||
from neutron.common import utils as n_utils
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
|
@ -23,13 +23,13 @@ import netaddr
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
import six
|
||||
from six import moves
|
||||
|
||||
from neutron.agent.common import ovs_lib
|
||||
from neutron.agent.common import polling
|
||||
from neutron.agent.common import utils
|
||||
from neutron.agent import l2population_rpc
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.agent import securitygroups_rpc as sg_rpc
|
||||
|
@ -41,15 +41,19 @@ from neutron.common import topics
|
|||
from neutron.common import utils as q_utils
|
||||
from neutron import context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent
|
||||
from neutron.plugins.openvswitch.common import constants
|
||||
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
|
||||
import constants
|
||||
from neutron.plugins.ml2.drivers.openvswitch.agent \
|
||||
import ovs_dvr_neutron_agent
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config')
|
||||
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
|
||||
'agent.common.config')
|
||||
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
|
||||
'common.config')
|
||||
|
||||
# A placeholder for dead vlans.
|
||||
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
|
||||
|
@ -196,7 +200,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
|||
'arp_responder_enabled':
|
||||
self.arp_responder_enabled,
|
||||
'enable_distributed_routing':
|
||||
self.enable_distributed_routing},
|
||||
self.enable_distributed_routing,
|
||||
'log_agent_heartbeats':
|
||||
self.conf.AGENT.log_agent_heartbeats},
|
||||
'agent_type': q_const.AGENT_TYPE_OVS,
|
||||
'start_flag': True}
|
||||
|
||||
|
@ -854,7 +860,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
|||
br_names = []
|
||||
for bridge in ovs_bridges:
|
||||
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
|
||||
if bridge_id and bridge_id != bridge:
|
||||
if bridge_id != bridge:
|
||||
br_names.append(bridge)
|
||||
ovs_bridges.difference_update(br_names)
|
||||
ancillary_bridges = []
|
|
@ -1376,7 +1376,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
return self._bind_port_if_needed(port_context)
|
||||
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
|
||||
retry_on_deadlock=True)
|
||||
retry_on_deadlock=True,
|
||||
retry_on_request=True)
|
||||
@db_api.convert_db_exception_to_retry(stale_data=True)
|
||||
def update_port_status(self, context, port_id, status, host=None,
|
||||
network=None):
|
||||
"""
|
||||
|
|
|
@ -22,6 +22,8 @@ from oslo_concurrency import processutils
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_messaging import server as rpc_server
|
||||
from oslo_service import loopingcall
|
||||
from oslo_service import service as common_service
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import importutils
|
||||
|
||||
|
@ -31,8 +33,6 @@ from neutron import context
|
|||
from neutron.db import api as session
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import service as common_service
|
||||
from neutron import wsgi
|
||||
|
||||
|
||||
|
@ -111,7 +111,7 @@ def serve_wsgi(cls):
|
|||
return service
|
||||
|
||||
|
||||
class RpcWorker(object):
|
||||
class RpcWorker(common_service.ServiceBase):
|
||||
"""Wraps a worker to be handled by ProcessLauncher"""
|
||||
def __init__(self, plugin):
|
||||
self._plugin = plugin
|
||||
|
@ -161,7 +161,8 @@ def serve_rpc():
|
|||
# be shared DB connections in child processes which may cause
|
||||
# DB errors.
|
||||
session.dispose()
|
||||
launcher = common_service.ProcessLauncher(wait_interval=1.0)
|
||||
launcher = common_service.ProcessLauncher(cfg.CONF,
|
||||
wait_interval=1.0)
|
||||
launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers)
|
||||
return launcher
|
||||
except Exception:
|
||||
|
|
|
@ -18,6 +18,9 @@ import time
|
|||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_service import periodic_task
|
||||
from oslo_service import service
|
||||
from oslo_utils import importutils
|
||||
|
||||
from neutron.agent.common import config
|
||||
|
@ -30,9 +33,6 @@ from neutron.common import utils
|
|||
from neutron import context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import periodic_task
|
||||
from neutron.openstack.common import service
|
||||
from neutron import service as neutron_service
|
||||
|
||||
|
||||
|
@ -298,4 +298,4 @@ def main():
|
|||
report_interval=cfg.CONF.AGENT.report_interval,
|
||||
manager='neutron.services.metering.agents.'
|
||||
'metering_agent.MeteringAgentWithStateReport')
|
||||
service.launch(server).wait()
|
||||
service.launch(cfg.CONF, server).wait()
|
||||
|
|
|
@ -26,7 +26,7 @@ class AllowedAddressPairTestJSON(base.BaseNetworkTest):
|
|||
|
||||
"""
|
||||
Tests the Neutron Allowed Address Pair API extension using the Tempest
|
||||
ReST client. The following API operations are tested with this extension:
|
||||
REST client. The following API operations are tested with this extension:
|
||||
|
||||
create port
|
||||
list ports
|
||||
|
|
|
@ -154,6 +154,7 @@ class DietTestCase(testtools.TestCase):
|
|||
self.useFixture(fixtures.NestedTempfile())
|
||||
self.useFixture(fixtures.TempHomeDir())
|
||||
|
||||
self.setup_double_mock_guard()
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
if bool_from_env('OS_STDOUT_CAPTURE'):
|
||||
|
@ -166,6 +167,34 @@ class DietTestCase(testtools.TestCase):
|
|||
self.addOnException(self.check_for_systemexit)
|
||||
self.orig_pid = os.getpid()
|
||||
|
||||
def setup_double_mock_guard(self):
|
||||
# mock.patch.stopall() uses a set in python < 3.4 so patches may not
|
||||
# be unwound in the same order they were applied. This can leak mocks
|
||||
# and cause tests down the line to fail.
|
||||
# More info: http://bugs.python.org/issue21239
|
||||
#
|
||||
# Use mock to patch mock.patch.start to check if a target has already
|
||||
# been patched and fail if it has.
|
||||
self.first_traceback = {}
|
||||
orig_start = mock._patch.start
|
||||
|
||||
def new_start(mself):
|
||||
mytarget = mself.getter()
|
||||
myattr = mself.attribute
|
||||
for patch in mself._active_patches:
|
||||
if (mytarget, myattr) == (patch.target, patch.attribute):
|
||||
key = str((patch.target, patch.attribute))
|
||||
self.fail("mock.patch was setup on an already patched "
|
||||
"target %s.%s. Stop the original patch before "
|
||||
"starting a new one. Traceback of 1st patch: %s"
|
||||
% (mytarget, myattr,
|
||||
''.join(self.first_traceback.get(key, []))))
|
||||
self.first_traceback[
|
||||
str((mytarget, myattr))] = traceback.format_stack()[:-2]
|
||||
return orig_start(mself)
|
||||
|
||||
mock.patch('mock._patch.start', new=new_start).start()
|
||||
|
||||
def check_for_systemexit(self, exc_info):
|
||||
if isinstance(exc_info[1], SystemExit):
|
||||
if os.getpid() != self.orig_pid:
|
||||
|
@ -215,7 +244,7 @@ class DietTestCase(testtools.TestCase):
|
|||
{'key': k, 'exp': v, 'act': actual_superset[k]})
|
||||
|
||||
|
||||
class ProcessMonitorFixture(fixtures.Fixture):
|
||||
class ProcessMonitorFixture(tools.SafeFixture):
|
||||
"""Test fixture to capture and cleanup any spawn process monitor."""
|
||||
def setUp(self):
|
||||
super(ProcessMonitorFixture, self).setUp()
|
||||
|
@ -381,9 +410,10 @@ class BaseTestCase(DietTestCase):
|
|||
cfg.CONF.set_override("notification_driver", notification_driver)
|
||||
|
||||
|
||||
class PluginFixture(fixtures.Fixture):
|
||||
class PluginFixture(tools.SafeFixture):
|
||||
|
||||
def __init__(self, core_plugin=None):
|
||||
super(PluginFixture, self).__init__()
|
||||
self.core_plugin = core_plugin
|
||||
|
||||
def setUp(self):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue