Browse Source

Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: I7bd5d0568e773365eaab1ebd99dcd7186da22dae
tags/7.0.0.0b3
Kyle Mestery 4 years ago
parent
commit
d7e60d59a3
100 changed files with 499 additions and 1516 deletions
  1. +2
    -2
      doc/source/devref/callbacks.rst
  2. +26
    -1
      doc/source/devref/linuxbridge_agent.rst
  3. +1
    -1
      doc/source/policies/thirdparty-ci.rst
  4. +4
    -0
      etc/dhcp_agent.ini
  5. +4
    -0
      etc/l3_agent.ini
  6. +4
    -0
      etc/metadata_agent.ini
  7. +0
    -0
      etc/neutron/plugins/ml2/linuxbridge_agent.ini
  8. +3
    -0
      etc/neutron/plugins/ml2/openvswitch_agent.ini
  9. +3
    -0
      etc/neutron/plugins/vmware/nsx.ini
  10. +2
    -0
      neutron/agent/common/config.py
  11. +3
    -2
      neutron/agent/dhcp/agent.py
  12. +2
    -2
      neutron/agent/dhcp_agent.py
  13. +4
    -3
      neutron/agent/l3/agent.py
  14. +11
    -0
      neutron/agent/l3/dvr_local_router.py
  15. +2
    -2
      neutron/agent/l3_agent.py
  16. +43
    -2
      neutron/agent/linux/dhcp.py
  17. +21
    -6
      neutron/agent/linux/ip_lib.py
  18. +1
    -1
      neutron/agent/linux/polling.py
  19. +2
    -1
      neutron/agent/metadata/agent.py
  20. +6
    -0
      neutron/agent/rpc.py
  21. +1
    -1
      neutron/cmd/eventlet/plugins/ovs_neutron_agent.py
  22. +2
    -1
      neutron/cmd/sanity/checks.py
  23. +6
    -3
      neutron/cmd/sanity_check.py
  24. +1
    -1
      neutron/common/rpc.py
  25. +42
    -0
      neutron/common/test_lib.py
  26. +19
    -7
      neutron/db/agents_db.py
  27. +1
    -1
      neutron/db/agentschedulers_db.py
  28. +21
    -0
      neutron/db/api.py
  29. +0
    -11
      neutron/db/db_base_plugin_common.py
  30. +11
    -28
      neutron/db/db_base_plugin_v2.py
  31. +66
    -52
      neutron/db/ipam_backend_mixin.py
  32. +2
    -1
      neutron/db/migration/migrate_to_ml2.py
  33. +3
    -2
      neutron/db/migration/models/head.py
  34. +2
    -1
      neutron/hacking/checks.py
  35. +36
    -19
      neutron/ipam/__init__.py
  36. +23
    -13
      neutron/ipam/utils.py
  37. +3
    -2
      neutron/manager.py
  38. +0
    -151
      neutron/openstack/common/eventlet_backdoor.py
  39. +0
    -147
      neutron/openstack/common/loopingcall.py
  40. +0
    -232
      neutron/openstack/common/periodic_task.py
  41. +0
    -507
      neutron/openstack/common/service.py
  42. +0
    -105
      neutron/openstack/common/systemd.py
  43. +0
    -150
      neutron/openstack/common/threadgroup.py
  44. +1
    -1
      neutron/plugins/hyperv/agent/l2_agent.py
  45. +1
    -1
      neutron/plugins/ibm/agent/sdnve_neutron_agent.py
  46. +1
    -1
      neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py
  47. +3
    -3
      neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py
  48. +1
    -1
      neutron/plugins/ml2/drivers/freescale/README.fslsdn
  49. +0
    -0
      neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py
  50. +0
    -0
      neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py
  51. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/__init__.py
  52. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/README
  53. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py
  54. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py
  55. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py
  56. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py
  57. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/agent/l2network_models_v2.py
  58. +8
    -6
      neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
  59. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py
  60. +0
    -0
      neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py
  61. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py
  62. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py
  63. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py
  64. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py
  65. +3
    -2
      neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py
  66. +2
    -1
      neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py
  67. +5
    -4
      neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
  68. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py
  69. +0
    -0
      neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py
  70. +2
    -1
      neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
  71. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/__init__.py
  72. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/README
  73. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py
  74. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py
  75. +2
    -1
      neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
  76. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py
  77. +4
    -2
      neutron/plugins/ml2/drivers/openvswitch/agent/main.py
  78. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py
  79. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py
  80. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py
  81. +3
    -2
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py
  82. +5
    -3
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py
  83. +6
    -3
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py
  84. +7
    -4
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py
  85. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py
  86. +2
    -1
      neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py
  87. +1
    -1
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
  88. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_models_v2.py
  89. +14
    -8
      neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
  90. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README
  91. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh
  92. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec
  93. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap
  94. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py
  95. +0
    -0
      neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py
  96. +3
    -1
      neutron/plugins/ml2/plugin.py
  97. +5
    -4
      neutron/service.py
  98. +4
    -4
      neutron/services/metering/agents/metering_agent.py
  99. +1
    -1
      neutron/tests/api/test_allowed_address_pair.py
  100. +32
    -2
      neutron/tests/base.py

+ 2
- 2
doc/source/devref/callbacks.rst View File

@@ -69,7 +69,7 @@ do whatever they are supposed to do. In a callback-less world this would work li
C->my_random_very_difficult_to_remember_method_about_router_created()

If B and/or C change, things become sour. In a callback-based world, things become a lot
more uniform and straightward:
more uniform and straightforward:

::

@@ -319,7 +319,7 @@ Is the registry thread-safe?

Short answer is no: it is not safe to make mutations while callbacks are being called (more
details as to why can be found `here <https://hg.python.org/releasing/2.7.9/file/753a8f457ddc/Objects/dictobject.c#l937>`_).
A mutation could happen if a 'subscribe'/'unsuscribe' operation interleaves with the execution
A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution
of the notify loop. Albeit there is a possibility that things may end up in a bad state, the
registry works correctly under the assumption that subscriptions happen at the very beginning
of the life of the process and that the unsubscriptions (if any) take place at the very end.

+ 26
- 1
doc/source/devref/linuxbridge_agent.rst View File

@@ -1,2 +1,27 @@
===============================
L2 Networking with Linux Bridge
-------------------------------
===============================

This Agent uses the `Linux Bridge
<http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge>`_ to
provide L2 connectivity for VM instances running on the compute node to the
public network. A graphical illustration of the deployment can be found in
`OpenStack Admin Guide Linux Bridge
<http://docs.openstack.org/admin-guide-cloud/content/under_the_hood_linuxbridge.html>`_

In most common deployments, there is a compute and a network node. On both the
compute and the network node, the Linux Bridge Agent will manage virtual
switches, connectivity among them, and interaction via virtual ports with other
network components such as namespaces and underlying interfaces. Additionally,
on the compute node, the Linux Bridge Agent will manage security groups.

Three use cases and their packet flow are documented as follows:

1. `Legacy implementation with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario1b.html>`_

2. `High Availability using L3HA with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario3b.html>`_

3. `Provider networks with Linux Bridge
<http://docs.openstack.org/networking-guide/deploy_scenario4b.html>`_

+ 1
- 1
doc/source/policies/thirdparty-ci.rst View File

@@ -18,7 +18,7 @@ changes to the infrastructure documentation using this url[2] (and please
review the patches) and check this doc on a regular basis for updates.

[1] http://ci.openstack.org/third_party.html
[2] https://review.openstack.org/#/q/status:open+project:openstack-infra/config+branch:master+topic:third-party,n,z
[2] https://review.openstack.org/#/q/status:open+project:openstack-infra/system-config+branch:master+topic:third-party,n,z

What Changes to Run Against
---------------------------

+ 4
- 0
etc/dhcp_agent.ini View File

@@ -90,3 +90,7 @@
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

[AGENT]
# Log agent heartbeats from this DHCP agent
# log_agent_heartbeats = False

+ 4
- 0
etc/l3_agent.ini View File

@@ -122,3 +122,7 @@

# The advertisement interval in seconds
# ha_vrrp_advert_int = 2

[AGENT]
# Log agent heartbeats from this L3 agent
# log_agent_heartbeats = False

+ 4
- 0
etc/metadata_agent.ini View File

@@ -66,3 +66,7 @@ admin_password = %SERVICE_PASSWORD%
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

[AGENT]
# Log agent heartbeats from this Metadata agent
# log_agent_heartbeats = False

etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini → etc/neutron/plugins/ml2/linuxbridge_agent.ini View File


etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini → etc/neutron/plugins/ml2/openvswitch_agent.ini View File

@@ -58,6 +58,9 @@
# of_interface = ovs-ofctl

[agent]
# Log agent heartbeats from this OVS agent
# log_agent_heartbeats = False

# Agent's polling interval in seconds
# polling_interval = 2


+ 3
- 0
etc/neutron/plugins/vmware/nsx.ini View File

@@ -156,6 +156,9 @@
# lock management.
# locking_coordinator_url =

# (Optional) DHCP lease time
# dhcp_lease_time = 86400

[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version

+ 2
- 0
neutron/agent/common/config.py View File

@@ -44,6 +44,8 @@ AGENT_STATE_OPTS = [
help=_('Seconds between nodes reporting state to server; '
'should be less than agent_down_time, best if it '
'is half or less than agent_down_time.')),
cfg.BoolOpt('log_agent_heartbeats', default=False,
help=_('Log agent heartbeats')),
]

INTERFACE_DRIVER_OPTS = [

+ 3
- 2
neutron/agent/dhcp/agent.py View File

@@ -21,6 +21,7 @@ import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_utils import importutils

from neutron.agent.linux import dhcp
@@ -36,7 +37,6 @@ from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall

LOG = logging.getLogger(__name__)

@@ -548,7 +548,8 @@ class DhcpAgentWithStateReport(DhcpAgent):
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval

+ 2
- 2
neutron/agent/dhcp_agent.py View File

@@ -17,6 +17,7 @@
import sys

from oslo_config import cfg
from oslo_service import service

from neutron.agent.common import config
from neutron.agent.dhcp import config as dhcp_config
@@ -24,7 +25,6 @@ from neutron.agent.linux import interface
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron.openstack.common import service
from neutron import service as neutron_service


@@ -49,4 +49,4 @@ def main():
topic=topics.DHCP_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport')
service.launch(server).wait()
service.launch(cfg.CONF, server).wait()

+ 4
- 3
neutron/agent/l3/agent.py View File

@@ -18,6 +18,8 @@ import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
@@ -47,8 +49,6 @@ from neutron.common import topics
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task

try:
from neutron_fwaas.services.firewall.agents.l3reference \
@@ -595,7 +595,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
'external_network_bridge': self.conf.external_network_bridge,
'gateway_external_network_id':
self.conf.gateway_external_network_id,
'interface_driver': self.conf.interface_driver},
'interface_driver': self.conf.interface_driver,
'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': l3_constants.AGENT_TYPE_L3}
report_interval = self.conf.AGENT.report_interval

+ 11
- 0
neutron/agent/l3/dvr_local_router.py View File

@@ -417,6 +417,17 @@ class DvrLocalRouter(router.RouterInfo):
is_first = False
if floating_ips:
is_first = self.fip_ns.subscribe(self.router_id)
if is_first and not fip_agent_port:
LOG.debug("No FloatingIP agent gateway port possibly due to "
"late binding of the private port to the host, "
"requesting agent gateway port for 'network-id' :"
"%s", ex_gw_port['network_id'])
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
self.agent.context, ex_gw_port['network_id'])
if not fip_agent_port:
LOG.error(_LE("No FloatingIP agent gateway port "
"returned from server for 'network-id': "
"%s"), ex_gw_port['network_id'])
if is_first and fip_agent_port:
if 'subnets' not in fip_agent_port:
LOG.error(_LE('Missing subnet/agent_gateway_port'))

+ 2
- 2
neutron/agent/l3_agent.py View File

@@ -17,6 +17,7 @@
import sys

from oslo_config import cfg
from oslo_service import service

from neutron.agent.common import config
from neutron.agent.l3 import config as l3_config
@@ -26,7 +27,6 @@ from neutron.agent.linux import interface
from neutron.agent.metadata import config as metadata_config
from neutron.common import config as common_config
from neutron.common import topics
from neutron.openstack.common import service
from neutron import service as neutron_service


@@ -51,4 +51,4 @@ def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'):
topic=topics.L3_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager=manager)
service.launch(server).wait()
service.launch(cfg.CONF, server).wait()

+ 43
- 2
neutron/agent/linux/dhcp.py View File

@@ -434,6 +434,44 @@ class Dnsmasq(DhcpLocalProcess):
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)

def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.

For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,

fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)

dnsmasq internal details for processing host file entries
1) dnsmaq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmaq uses this this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmaq logs "no address available"
and fails to send DHCPOFFER message.

As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html

So If we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
constants.DHCPV6_STATELESS))),
reverse=True)

def _iter_hosts(self):
"""Iterate over hosts.

@@ -449,8 +487,11 @@ class Dnsmasq(DhcpLocalProcess):
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)

for port in self.network.ports:
for alloc in port.fixed_ips:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
for alloc in fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
# dhcp agent
@@ -911,7 +952,7 @@ class DeviceManager(object):
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
gateway = gateway.get('gateway')

for subnet in network.subnets:
skip_subnet = (

+ 21
- 6
neutron/agent/linux/ip_lib.py View File

@@ -19,6 +19,7 @@ import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import re

from neutron.agent.common import utils
from neutron.common import exceptions
@@ -36,6 +37,8 @@ OPTS = [
LOOPBACK_DEVNAME = 'lo'

SYS_NET_PATH = '/sys/class/net'
DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
METRIC_PATTERN = re.compile(r"metric (\S+)")


class AddressNotReady(exceptions.NeutronException):
@@ -531,12 +534,13 @@ class IpRouteCommand(IpDeviceCommandBase):
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
gateway_index = 2
parts = default_route_line.split()
retval = dict(gateway=parts[gateway_index])
if 'metric' in parts:
metric_index = parts.index('metric') + 1
retval.update(metric=int(parts[metric_index]))
retval = dict()
gateway = DEFAULT_GW_PATTERN.search(default_route_line)
if gateway:
retval.update(gateway=gateway.group(1))
metric = METRIC_PATTERN.search(default_route_line)
if metric:
retval.update(metric=int(metric.group(1)))

return retval

@@ -630,6 +634,17 @@ class IpNeighCommand(IpDeviceCommandBase):
('show',
'dev', self.name))

def flush(self, ip_version, ip_address):
"""Flush neighbour entries

Given address entry is removed from neighbour cache (ARP or NDP). To
flush all entries pass string 'all' as an address.

:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
:param ip_address: The prefix selecting the neighbours to flush
"""
self._as_root([ip_version], ('flush', 'to', ip_address))


class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'

+ 1
- 1
neutron/agent/linux/polling.py View File

@@ -18,7 +18,7 @@ import eventlet

from neutron.agent.common import base_polling
from neutron.agent.linux import ovsdb_monitor
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants


@contextlib.contextmanager

+ 2
- 1
neutron/agent/metadata/agent.py View File

@@ -20,6 +20,7 @@ from neutronclient.v2_0 import client
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
import six.moves.urllib.parse as urlparse
import webob
@@ -34,7 +35,6 @@ from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LW
from neutron.openstack.common.cache import cache
from neutron.openstack.common import loopingcall

LOG = logging.getLogger(__name__)

@@ -289,6 +289,7 @@ class UnixDomainMetadataProxy(object):
'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket,
'nova_metadata_ip': cfg.CONF.nova_metadata_ip,
'nova_metadata_port': cfg.CONF.nova_metadata_port,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats,
},
'start_flag': True,
'agent_type': n_const.AGENT_TYPE_METADATA}

+ 6
- 0
neutron/agent/rpc.py View File

@@ -18,6 +18,7 @@ import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils

from neutron.common import constants
from neutron.common import rpc as n_rpc
@@ -72,6 +73,11 @@ class PluginReportStateAPI(object):

def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
# add unique identifier to a report
# that can be logged on server side.
# This create visible correspondence between events on
# the agent and on the server
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': timeutils.strtime(),

+ 1
- 1
neutron/cmd/eventlet/plugins/ovs_neutron_agent.py View File

@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.

import neutron.plugins.openvswitch.agent.main as agent_main
import neutron.plugins.ml2.drivers.openvswitch.agent.main as agent_main


def main():

+ 2
- 1
neutron/cmd/sanity/checks.py View File

@@ -27,7 +27,8 @@ from neutron.agent.linux import utils as agent_utils
from neutron.common import utils
from neutron.i18n import _LE
from neutron.plugins.common import constants as const
from neutron.plugins.openvswitch.common import constants as ovs_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as ovs_const

LOG = logging.getLogger(__name__)


+ 6
- 3
neutron/cmd/sanity_check.py View File

@@ -25,9 +25,12 @@ from neutron.i18n import _LE, _LW


LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config')
cfg.CONF.import_group('VXLAN', 'neutron.plugins.linuxbridge.common.config')
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.'
'agent.common.config')
cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
cfg.CONF.import_group('ml2_sriov',
'neutron.plugins.ml2.drivers.mech_sriov.mech_driver')

+ 1
- 1
neutron/common/rpc.py View File

@@ -18,10 +18,10 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_messaging import serializer as om_serializer
from oslo_service import service

from neutron.common import exceptions
from neutron import context
from neutron.openstack.common import service


LOG = logging.getLogger(__name__)

+ 42
- 0
neutron/common/test_lib.py View File

@@ -0,0 +1,42 @@
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

# describes parameters used by different unit/functional tests
# a plugin-specific testing mechanism should import this dictionary
# and override the values in it if needed (e.g., run_tests.py in
# neutron/plugins/openvswitch/ )
test_config = {}

+ 19
- 7
neutron/db/agents_db.py View File

@@ -29,7 +29,7 @@ from neutron.common import constants
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import agent as ext_agent
from neutron.i18n import _LE, _LW
from neutron.i18n import _LE, _LI, _LW
from neutron import manager

LOG = logging.getLogger(__name__)
@@ -203,22 +203,33 @@ class AgentDbMixin(ext_agent.AgentPluginBase):
agent = self._get_agent(context, id)
return self._make_agent_dict(agent, fields)

def _create_or_update_agent(self, context, agent):
def _log_heartbeat(self, state, agent_db, agent_conf):
if agent_conf.get('log_agent_heartbeats'):
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
LOG.info(_LI("Heartbeat received from %(type)s agent on "
"host %(host)s, uuid %(uuid)s after %(delta)s"),
{'type': agent_db.agent_type,
'host': agent_db.host,
'uuid': state.get('uuid'),
'delta': delta})

def _create_or_update_agent(self, context, agent_state):
with context.session.begin(subtransactions=True):
res_keys = ['agent_type', 'binary', 'host', 'topic']
res = dict((k, agent[k]) for k in res_keys)
res = dict((k, agent_state[k]) for k in res_keys)

configurations_dict = agent.get('configurations', {})
configurations_dict = agent_state.get('configurations', {})
res['configurations'] = jsonutils.dumps(configurations_dict)
res['load'] = self._get_agent_load(agent)
res['load'] = self._get_agent_load(agent_state)
current_time = timeutils.utcnow()
try:
agent_db = self._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
context, agent_state['agent_type'], agent_state['host'])
res['heartbeat_timestamp'] = current_time
if agent.get('start_flag'):
if agent_state.get('start_flag'):
res['started_at'] = current_time
greenthread.sleep(0)
self._log_heartbeat(agent_state, agent_db, configurations_dict)
agent_db.update(res)
except ext_agent.AgentNotFoundByTypeHost:
greenthread.sleep(0)
@@ -229,6 +240,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase):
agent_db = Agent(**res)
greenthread.sleep(0)
context.session.add(agent_db)
self._log_heartbeat(agent_state, agent_db, configurations_dict)
greenthread.sleep(0)

def create_or_update_agent(self, context, agent):

+ 1
- 1
neutron/db/agentschedulers_db.py View File

@@ -19,6 +19,7 @@ import time

from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy import orm
@@ -32,7 +33,6 @@ from neutron.db import model_base
from neutron.extensions import agent as ext_agent
from neutron.extensions import dhcpagentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import loopingcall


LOG = logging.getLogger(__name__)

+ 21
- 0
neutron/db/api.py View File

@@ -14,10 +14,13 @@
# under the License.

import contextlib
import six

from oslo_config import cfg
from oslo_db import exception as os_db_exception
from oslo_db.sqlalchemy import session
from sqlalchemy import exc
from sqlalchemy import orm


_FACADE = None
@@ -64,3 +67,21 @@ def autonested_transaction(sess):
finally:
with session_context as tx:
yield tx


class convert_db_exception_to_retry(object):
"""Converts other types of DB exceptions into RetryRequests."""

def __init__(self, stale_data=False):
self.to_catch = ()
if stale_data:
self.to_catch += (orm.exc.StaleDataError, )

def __call__(self, f):
@six.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except self.to_catch as e:
raise os_db_exception.RetryRequest(e)
return wrapper

+ 0
- 11
neutron/db/db_base_plugin_common.py View File

@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.

import netaddr

from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy.orm import exc
@@ -25,7 +23,6 @@ from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import common_db_mixin
from neutron.db import models_v2
from neutron.ipam import utils as ipam_utils

LOG = logging.getLogger(__name__)

@@ -75,14 +72,6 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
)
context.session.add(allocated)

@classmethod
def _check_gateway_in_subnet(cls, cidr, gateway):
"""Validate that the gateway is on the subnet."""
ip = netaddr.IPAddress(gateway)
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
return ipam_utils.check_subnet_ip(cidr, gateway)
return True

def _make_subnet_dict(self, subnet, fields=None):
res = {'id': subnet['id'],
'name': subnet['name'],

+ 11
- 28
neutron/db/db_base_plugin_v2.py View File

@@ -31,7 +31,6 @@ from neutron.callbacks import resources
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils
from neutron import context as ctx
from neutron.db import api as db_api
from neutron.db import ipam_non_pluggable_backend
@@ -356,7 +355,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
if attributes.is_attr_set(s.get('gateway_ip')):
self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip')
if (cfg.CONF.force_gateway_on_subnet and
not self._check_gateway_in_subnet(
not ipam.utils.check_gateway_in_subnet(
s['cidr'], s['gateway_ip'])):
error_message = _("Gateway is not valid on subnet")
raise n_exc.InvalidInput(error_message=error_message)
@@ -440,32 +439,11 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
external_gateway_info}}
l3plugin.update_router(context, id, info)

def _make_subnet_request(self, tenant_id, subnet, subnetpool):
cidr = subnet.get('cidr')
subnet_id = subnet.get('id', uuidutils.generate_uuid())
is_any_subnetpool_request = not attributes.is_attr_set(cidr)

if is_any_subnetpool_request:
prefixlen = subnet['prefixlen']
if not attributes.is_attr_set(prefixlen):
prefixlen = int(subnetpool['default_prefixlen'])

return ipam.AnySubnetRequest(
tenant_id,
subnet_id,
utils.ip_version_from_int(subnetpool['ip_version']),
prefixlen)
else:
return ipam.SpecificSubnetRequest(tenant_id,
subnet_id,
cidr)

@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True,
retry_on_deadlock=True)
def _create_subnet_from_pool(self, context, subnet, subnetpool_id):
s = subnet['subnet']
tenant_id = self._get_tenant_id_for_create(context, s)
self._validate_pools_with_subnetpool(s)

with context.session.begin(subtransactions=True):
@@ -474,7 +452,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,

network = self._get_network(context, s["network_id"])
allocator = subnet_alloc.SubnetAllocator(subnetpool, context)
req = self._make_subnet_request(tenant_id, s, subnetpool)
req = ipam.SubnetRequestFactory.get_request(context, s, subnetpool)

ipam_subnet = allocator.allocate_subnet(req)
detail = ipam_subnet.get_details()
@@ -498,9 +476,8 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
def _create_subnet_from_implicit_pool(self, context, subnet):
s = subnet['subnet']
self._validate_subnet(context, s)
tenant_id = self._get_tenant_id_for_create(context, s)
id = s.get('id', uuidutils.generate_uuid())
detail = ipam.SpecificSubnetRequest(tenant_id,
detail = ipam.SpecificSubnetRequest(s['tenant_id'],
id,
s['cidr'])
with context.session.begin(subtransactions=True):
@@ -571,6 +548,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
net = netaddr.IPNetwork(s['cidr'])
subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen)

s['tenant_id'] = self._get_tenant_id_for_create(context, s)
subnetpool_id = self._get_subnetpool_id(s)
if not subnetpool_id:
if not has_cidr:
@@ -608,8 +586,13 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
self._validate_subnet(context, s, cur_subnet=db_subnet)

if s.get('gateway_ip') is not None:
allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']}
for p in db_subnet.allocation_pools]
if s.get('allocation_pools') is not None:
allocation_pools = [{'start': p['start'], 'end': p['end']}
for p in s['allocation_pools']]
else:
allocation_pools = [{'start': p['first_ip'],
'end': p['last_ip']}
for p in db_subnet.allocation_pools]
self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools)

with context.session.begin(subtransactions=True):

+ 66
- 52
neutron/db/ipam_backend_mixin.py View File

@@ -14,6 +14,7 @@
# under the License.

import collections
import itertools

import netaddr
from oslo_config import cfg
@@ -27,6 +28,7 @@ from neutron.common import ipv6_utils
from neutron.db import db_base_plugin_common
from neutron.db import models_v2
from neutron.i18n import _LI
from neutron.ipam import utils as ipam_utils

LOG = logging.getLogger(__name__)

@@ -174,24 +176,10 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
a list of dict objects with 'start' and 'end' keys for
defining the pool range.
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(subnet['cidr'])
first_ip = net.first + 1
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append({'start': str(netaddr.IPAddress(first_ip)),
'end': str(netaddr.IPAddress(split_ip - 1))})
if split_ip < last_ip:
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
'end': str(netaddr.IPAddress(last_ip))})
# return auto-generated pools
# no need to check for their validity
return pools
pools = ipam_utils.generate_pools(subnet['cidr'], subnet['gateway_ip'])
return [{'start': str(netaddr.IPAddress(pool.first)),
'end': str(netaddr.IPAddress(pool.last))}
for pool in pools]

def _validate_subnet_cidr(self, context, network, new_subnet_cidr):
"""Validate the CIDR for a subnet.
@@ -251,7 +239,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
"""
subnet = netaddr.IPNetwork(subnet_cidr)
subnet_first_ip = netaddr.IPAddress(subnet.first + 1)
subnet_last_ip = netaddr.IPAddress(subnet.last - 1)
# last address is broadcast in v4
subnet_last_ip = netaddr.IPAddress(subnet.last - (subnet.version == 4))

LOG.debug("Performing IP validity checks on allocation pools")
ip_sets = []
@@ -308,6 +297,16 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
pool_2=r_range,
subnet_cidr=subnet_cidr)

def _prepare_allocation_pools(self, context, allocation_pools, subnet):
if not attributes.is_attr_set(allocation_pools):
return self._allocate_pools_for_subnet(context, subnet)

self._validate_allocation_pools(allocation_pools, subnet['cidr'])
if subnet['gateway_ip']:
self._validate_gw_out_of_pools(subnet['gateway_ip'],
allocation_pools)
return allocation_pools

def _validate_gw_out_of_pools(self, gateway_ip, pools):
for allocation_pool in pools:
pool_range = netaddr.IPRange(
@@ -318,6 +317,15 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
pool=pool_range,
ip_address=gateway_ip)

def _is_ip_required_by_subnet(self, context, subnet_id, device_owner):
# For ports that are not router ports, retain any automatic
# (non-optional, e.g. IPv6 SLAAC) addresses.
if device_owner in constants.ROUTER_INTERFACE_OWNERS:
return True

subnet = self._get_subnet(context, subnet_id)
return not ipv6_utils.is_auto_address_subnet(subnet)

def _get_changed_ips_for_port(self, context, original_ips,
new_ips, device_owner):
"""Calculate changes in IPs for the port."""
@@ -326,30 +334,44 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
msg = _('Exceeded maximum amount of fixed ips per port')
raise n_exc.InvalidInput(error_message=msg)

# These ips are still on the port and haven't been removed
prev_ips = []

# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
break
add_ips = []
remove_ips = []
ips_map = {ip['ip_address']: ip
for ip in itertools.chain(new_ips, original_ips)
if 'ip_address' in ip}

new = set()
for ip in new_ips:
if 'ip_address' in ip:
new.add(ip['ip_address'])
else:
# For ports that are not router ports, retain any automatic
# (non-optional, e.g. IPv6 SLAAC) addresses.
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
subnet = self._get_subnet(context,
original_ip['subnet_id'])
if (ipv6_utils.is_auto_address_subnet(subnet)):
original_ips.remove(original_ip)
prev_ips.append(original_ip)
return self.Changes(add=new_ips,
add_ips.append(ip)

# Convert original ip addresses to sets
orig = set(ip['ip_address'] for ip in original_ips)

add = new - orig
unchanged = new & orig
remove = orig - new

# Convert results back to list of dicts
add_ips += [ips_map[ip] for ip in add]
prev_ips = [ips_map[ip] for ip in unchanged]

# Mark ip for removing if it is not found in new_ips
# and subnet requires ip to be set manually.
# For auto addresses leave ip unchanged
for ip in remove:
subnet_id = ips_map[ip]['subnet_id']
if self._is_ip_required_by_subnet(context, subnet_id,
device_owner):
remove_ips.append(ips_map[ip])
else:
prev_ips.append(ips_map[ip])

return self.Changes(add=add_ips,
original=prev_ips,
remove=original_ips)
remove=remove_ips)

def _delete_port(self, context, port_id):
query = (context.session.query(models_v2.Port).
@@ -364,17 +386,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
dns_nameservers,
host_routes,
allocation_pools):

if not attributes.is_attr_set(allocation_pools):
allocation_pools = self._allocate_pools_for_subnet(context,
subnet_args)
else:
self._validate_allocation_pools(allocation_pools,
subnet_args['cidr'])
if subnet_args['gateway_ip']:
self._validate_gw_out_of_pools(subnet_args['gateway_ip'],
allocation_pools)

allocation_pools = self._prepare_allocation_pools(context,
allocation_pools,
subnet_args)
self._validate_subnet_cidr(context, network, subnet_args['cidr'])
self._validate_network_subnetpools(network,
subnet_args['subnetpool_id'],

+ 2
- 1
neutron/db/migration/migrate_to_ml2.py View File

@@ -101,7 +101,8 @@ def check_db_schema_version(engine, metadata):
)


# Duplicated from neutron.plugins.linuxbridge.common.constants to
# Duplicated from
# neutron.plugins.ml2.drivers.linuxbridge.agent.common.constants to
# avoid having any dependency on the linuxbridge plugin being
# installed.
def interpret_vlan_id(vlan_id):

+ 3
- 2
neutron/db/migration/models/head.py View File

@@ -50,7 +50,6 @@ from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.linuxbridge.db import l2network_models_v2 # noqa
from neutron.plugins.metaplugin import meta_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
@@ -60,6 +59,9 @@ from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)
from neutron.plugins.ml2.drivers.cisco.ucsm import ucsm_model # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent import ( # noqa
l2network_models_v2)
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_models_v2 # noqa
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
@@ -67,7 +69,6 @@ from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.plugins.nec.db import models as nec_models # noqa
from neutron.plugins.nuage import nuage_models # noqa
from neutron.plugins.openvswitch import ovs_models_v2 # noqa
from neutron.plugins.vmware.dbexts import nsx_models # noqa
from neutron.plugins.vmware.dbexts import nsxv_models # noqa
from neutron.plugins.vmware.dbexts import vcns_models # noqa

+ 2
- 1
neutron/hacking/checks.py View File

@@ -75,7 +75,8 @@ def use_jsonutils(logical_line, filename):
# Some files in the tree are not meant to be run from inside Neutron
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
"neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap",
"neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/"
"plugins/netwrap",
]

for pattern in json_check_skipped_patterns:

+ 36
- 19
neutron/ipam/__init__.py View File

@@ -14,10 +14,13 @@ import abc
import netaddr

from oslo_config import cfg
from oslo_utils import uuidutils
import six

from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.ipam import exceptions as ipam_exc


@@ -243,28 +246,42 @@ class RouterGatewayAddressRequest(AddressRequest):
"""Used to request allocating the special router gateway address."""


class BaseRequestFactory(object):
"""Factory class to create right request based on input"""
any_request = None
specific_request = None
address_index = 0
class AddressRequestFactory(object):
"""Builds request using ip info

Additional parameters(port and context) are not used in default
implementation, but planned to be used in sub-classes
provided by specific ipam driver,
"""

@classmethod
def get_request(cls, *args, **kwargs):
args_list = [a for a in args]
address = args_list.pop(cls.address_index)
if not address:
return cls.any_request(*args_list, **kwargs)
def get_request(cls, context, port, ip):
if not ip:
return AnyAddressRequest()
else:
return cls.specific_request(*args, **kwargs)

return SpecificAddressRequest(ip)

class AddressRequestFactory(BaseRequestFactory):
any_request = AnyAddressRequest
specific_request = SpecificAddressRequest

class SubnetRequestFactory(object):
"""Builds request using subnet info"""

class SubnetRequestFactory(BaseRequestFactory):
any_request = AnySubnetRequest
specific_request = SpecificSubnetRequest
address_index = 2
@classmethod
def get_request(cls, context, subnet, subnetpool):
cidr = subnet.get('cidr')
subnet_id = subnet.get('id', uuidutils.generate_uuid())
is_any_subnetpool_request = not attributes.is_attr_set(cidr)

if is_any_subnetpool_request:
prefixlen = subnet['prefixlen']
if not attributes.is_attr_set(prefixlen):
prefixlen = int(subnetpool['default_prefixlen'])

return AnySubnetRequest(
subnet['tenant_id'],
subnet_id,
common_utils.ip_version_from_int(subnetpool['ip_version']),
prefixlen)
else:
return SpecificSubnetRequest(subnet['tenant_id'],
subnet_id,
cidr)

+ 23
- 13
neutron/ipam/utils.py View File

@@ -21,28 +21,38 @@ def check_subnet_ip(cidr, ip_address):
ip = netaddr.IPAddress(ip_address)
net = netaddr.IPNetwork(cidr)
# Check that the IP is valid on subnet. This cannot be the
# network or the broadcast address
return (ip != net.network and ip != net.broadcast
# network or the broadcast address (which exists only in IPv4)
return (ip != net.network
and (net.version == 6 or ip != net.broadcast)
and net.netmask & ip == net.network)


def check_gateway_in_subnet(cidr, gateway):
"""Validate that the gateway is on the subnet."""
ip = netaddr.IPAddress(gateway)
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
return check_subnet_ip(cidr, gateway)
return True


def generate_pools(cidr, gateway_ip):
"""Create IP allocation pools for a specified subnet

The Neutron API defines a subnet's allocation pools as a list of
IPRange objects for defining the pool range.
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(cidr)
if net.first == net.last:
# handle single address subnet case
return [netaddr.IPRange(net.first, net.last)]
first_ip = net.first + 1
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(gateway_ip or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append(netaddr.IPRange(first_ip, split_ip - 1))
if split_ip < last_ip:
pools.append(netaddr.IPRange(split_ip + 1, last_ip))
return pools
# last address is broadcast in v4
last_ip = net.last - (net.version == 4)
if first_ip >= last_ip:
# /31 lands here
return []
ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip))
if gateway_ip:
ipset.remove(netaddr.IPAddress(gateway_ip))
return list(ipset.iter_ipranges())

+ 3
- 2
neutron/manager.py View File

@@ -18,12 +18,12 @@ import weakref
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
from oslo_utils import importutils
import six

from neutron.common import utils
from neutron.i18n import _LE, _LI
from neutron.openstack.common import periodic_task
from neutron.plugins.common import constants

from stevedore import driver
@@ -43,7 +43,8 @@ class Manager(periodic_task.PeriodicTasks):
if not host:
host = cfg.CONF.host
self.host = host
super(Manager, self).__init__()
conf = getattr(self, "conf", cfg.CONF)
super(Manager, self).__init__(conf)

def periodic_tasks(self, context, raise_on_error=False):
self.run_periodic_tasks(context, raise_on_error=raise_on_error)

+ 0
- 151
neutron/openstack/common/eventlet_backdoor.py View File

@@ -1,151 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from __future__ import print_function

import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback

import eventlet.backdoor
import greenlet
from oslo_config import cfg

from neutron.openstack.common._i18n import _LI

help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]

CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)


def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]


class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range


def _dont_use_this():
print("Don't use this, just disconnect instead")


def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]


def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()


def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()


def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)


def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1


def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}

if CONF.backdoor_port is None:
return None

start_port, end_port = _parse_port_range(str(CONF.backdoor_port))

# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook

sock = _listen('localhost', start_port, end_port, eventlet.listen)

# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

+ 0
- 147
neutron/openstack/common/loopingcall.py View File

@@ -1,147 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import logging
import sys
import time

from eventlet import event
from eventlet import greenthread

from neutron.openstack.common._i18n import _LE, _LW

LOG = logging.getLogger(__name__)

# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
# with time.time() called in the standard logging module
# during unittests.
_ts = lambda: time.time()


class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCallBase.

The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.

An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCallBase.wait()

"""

def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue


class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None

def stop(self):
self._running = False

def wait(self):
return self.done.wait()


class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""

def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()

def _inner():
if initial_delay:
greenthread.sleep(initial_delay)

try:
while self._running:
start = _ts()
self.f(*self.args, **self.kw)
end = _ts()
if not self._running:
break
delay = end - start - interval
if delay > 0:
LOG.warning(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)

self.done = done

greenthread.spawn_n(_inner)
return self.done


class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.

The function called should return how long to sleep for before being
called again.
"""

def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()

def _inner():
if initial_delay:
greenthread.sleep(initial_delay)

try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break

if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
{'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)

self.done = done

greenthread.spawn(_inner)
return self.done

+ 0
- 232
neutron/openstack/common/periodic_task.py View File

@@ -1,232 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

import copy
import logging
import random
import time

from oslo_config import cfg
import six

from neutron.openstack.common._i18n import _, _LE, _LI


periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]

CONF = cfg.CONF
CONF.register_opts(periodic_opts)

LOG = logging.getLogger(__name__)

DEFAULT_INTERVAL = 60.0


def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(periodic_opts))]


class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")


def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.

This decorator can be used in two ways:

1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.

2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')

# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)

# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f

# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])


class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.

The task should already be decorated by @periodic_task.

:return: whether task was actually enabled
"""
name = task._periodic_name

if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False

# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL

cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True

def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)

# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []

try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}

for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
cls._add_periodic_task(value)


def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.

Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...

0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter


@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run

def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.

The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)

def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])

spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]

# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue

LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)

try:
task(self, context)
except Exception:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s"),
{"full_task_name": full_task_name})
time.sleep(0)

return idle_for

+ 0
- 507
neutron/openstack/common/service.py View File

@@ -1,507 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Generic Node base class for all workers that run on hosts."""

import errno
import io
import logging
import os
import random
import signal
import sys
import time

import eventlet
from eventlet import event
from oslo_config import cfg

from neutron.openstack.common import eventlet_backdoor
from neutron.openstack.common._i18n import _LE, _LI, _LW
from neutron.openstack.common import systemd
from neutron.openstack.common import threadgroup


CONF = cfg.CONF
LOG = logging.getLogger(__name__)


def _sighup_supported():
return hasattr(signal, 'SIGHUP')


def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except io.UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
return is_daemon


def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()


def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]


def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)


class Launcher(object):
"""Launch one or more services and wait for them to complete."""

def __init__(self):
"""Initialize the service launcher.

:returns: None

"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()

def launch_service(self, service):
"""Load and start the given service.

:param service: The service you would like to start.
:returns: None

"""
service.backdoor_port = self.backdoor_port
self.services.add(service)

def stop(self):
"""Stop all services which are currently running.

:returns: None

"""
self.services.stop()

def wait(self):
"""Waits until all services have been stopped, and then returns.

:returns: None

"""
self.services.wait()

def restart(self):
"""Reload config files and restart service.

:returns: None

"""
cfg.CONF.reload_config_files()
self.services.restart()


class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo


class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)

def handle_signal(self):
_set_signals_handler(self._handle_signal)

def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0

LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, logging.DEBUG)

try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()

return status, signo

def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()


class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []


class ProcessLauncher(object):
_signal_handlers_set = set()

@classmethod
def _handle_class_signals(cls, *args, **kwargs):
for handler in cls._signal_handlers_set:
handler(*args, **kwargs)

def __init__(self, wait_interval=0.01):
"""Constructor.

:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()

def handle_signal(self):
self._signal_handlers_set.add(self._handle_signal)
_set_signals_handler(self._handle_class_signals)

def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False

# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)

def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read(1)

LOG.info(_LI('Parent process has died unexpectedly, exiting'))

sys.exit(1)

def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)

# Parent signals with SIGTERM when it wants us to go away.
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)

def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0

# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()

return status, signo

def _child_process(self, service):
self._child_process_handle_signal()

# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()

# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe