[OVN] Check metadata HA proxy configuration before restart
Since [1], the OVN Metadata agent has support for IPv6. If the agent is updated, the HA proxy instances need to be reconfigured and restarted. However, that needs to be done only once; the next time the OVN agent is restarted, if the HA proxy instances are updated (have IPv6 support), they won't be restarted. [1]https://review.opendev.org/c/openstack/neutron/+/894026 Closes-Bug: #2079996 Change-Id: Id0f678c7ffe162df42e18dfebb97dce677fc79fc
This commit is contained in:
parent
0ac6cbec75
commit
7b7f8d986a
@ -36,6 +36,7 @@ from oslo_utils import fileutils
|
||||
import psutil
|
||||
|
||||
from neutron.api import wsgi
|
||||
from neutron.common import utils
|
||||
from neutron.conf.agent import common as config
|
||||
from neutron.privileged.agent.linux import utils as priv_utils
|
||||
|
||||
@ -399,6 +400,19 @@ def delete_if_exists(path, run_as_root=False):
|
||||
fileutils.delete_if_exists(path)
|
||||
|
||||
|
||||
def read_if_exists(path: str, run_as_root=False) -> str:
|
||||
"""Return the content of a text file as a string
|
||||
|
||||
The output includes the empty lines too. If the file does not exist,
|
||||
returns an empty string.
|
||||
It could be called with elevated permissions (root).
|
||||
"""
|
||||
if run_as_root:
|
||||
return priv_utils.read_file(path)
|
||||
else:
|
||||
return utils.read_file(path)
|
||||
|
||||
|
||||
class UnixDomainHTTPConnection(httplib.HTTPConnection):
|
||||
"""Connection class for HTTP over UNIX domain socket."""
|
||||
def __init__(self, host, port=None, strict=None, timeout=None,
|
||||
|
@ -46,7 +46,7 @@ listen listener
|
||||
"""
|
||||
|
||||
|
||||
class HaproxyConfiguratorBase(object):
|
||||
class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta):
|
||||
PROXY_CONFIG_DIR = None
|
||||
HEADER_CONFIG_TEMPLATE = None
|
||||
|
||||
@ -76,9 +76,27 @@ class HaproxyConfiguratorBase(object):
|
||||
# /var/log/haproxy.log on Debian distros, instead of to syslog.
|
||||
uuid = network_id or router_id
|
||||
self.log_tag = "haproxy-{}-{}".format(METADATA_SERVICE_NAME, uuid)
|
||||
self._haproxy_cfg = ''
|
||||
self._resource_id = None
|
||||
self._create_config()
|
||||
|
||||
def create_config_file(self):
|
||||
"""Create the config file for haproxy."""
|
||||
@property
|
||||
def haproxy_cfg(self) -> str:
|
||||
return self._haproxy_cfg
|
||||
|
||||
@property
|
||||
def resource_id(self) -> str:
|
||||
return self._resource_id
|
||||
|
||||
def _create_config(self) -> None:
|
||||
"""Create the configuration for haproxy, stored locally
|
||||
|
||||
This method creates a string with the HAProxy configuration, stored in
|
||||
``self._haproxy_cfg``. It also stores the resource ID (network, router)
|
||||
in ``self._resource_id``.
|
||||
|
||||
This method must be called once in the init method.
|
||||
"""
|
||||
# Need to convert uid/gid into username/group
|
||||
try:
|
||||
username = pwd.getpwuid(int(self.user)).pw_name
|
||||
@ -127,27 +145,49 @@ class HaproxyConfiguratorBase(object):
|
||||
cfg_info['res_type'] = 'Router'
|
||||
cfg_info['res_id'] = self.router_id
|
||||
cfg_info['res_type_del'] = 'Network'
|
||||
self._resource_id = cfg_info['res_id']
|
||||
self._haproxy_cfg = comm_meta.get_haproxy_config(
|
||||
cfg_info, self.rate_limiting_config,
|
||||
self.HEADER_CONFIG_TEMPLATE, _UNLIMITED_CONFIG_TEMPLATE)
|
||||
|
||||
haproxy_cfg = comm_meta.get_haproxy_config(cfg_info,
|
||||
self.rate_limiting_config,
|
||||
self.HEADER_CONFIG_TEMPLATE,
|
||||
_UNLIMITED_CONFIG_TEMPLATE)
|
||||
|
||||
LOG.debug("haproxy_cfg = %s", haproxy_cfg)
|
||||
def create_config_file(self):
|
||||
"""Read the configuration stored and write the configuration file"""
|
||||
LOG.debug("haproxy_cfg = %s", self.haproxy_cfg)
|
||||
cfg_dir = self.get_config_path(self.state_path)
|
||||
# uuid has to be included somewhere in the command line so that it can
|
||||
# be tracked by process_monitor.
|
||||
self.cfg_path = os.path.join(cfg_dir, "%s.conf" % cfg_info['res_id'])
|
||||
self.cfg_path = os.path.join(cfg_dir, "%s.conf" % self.resource_id)
|
||||
if not os.path.exists(cfg_dir):
|
||||
os.makedirs(cfg_dir)
|
||||
with open(self.cfg_path, "w") as cfg_file:
|
||||
cfg_file.write(haproxy_cfg)
|
||||
cfg_file.write(self.haproxy_cfg)
|
||||
|
||||
@classmethod
|
||||
def get_config_path(cls, state_path):
|
||||
return os.path.join(state_path or cfg.CONF.state_path,
|
||||
cls.PROXY_CONFIG_DIR)
|
||||
|
||||
def read_config_file(self) -> str:
|
||||
"""Return a string with the content of the configuration file"""
|
||||
cfg_path = os.path.join(self.get_config_path(self.state_path),
|
||||
'%s.conf' % self.resource_id)
|
||||
return linux_utils.read_if_exists(str(cfg_path), run_as_root=True)
|
||||
|
||||
def is_config_file_obsolete(self) -> bool:
|
||||
"""Compare the instance config and the config file content
|
||||
|
||||
Returns False if both configurations match. This check skips the
|
||||
"pidfile" line because that is provided just before the process is
|
||||
started.
|
||||
"""
|
||||
def trim_config(haproxy_cfg: str) -> list[str]:
|
||||
return [line for line in haproxy_cfg.split('\n')
|
||||
if not line.lstrip().startswith('pidfile')]
|
||||
|
||||
file_config = trim_config(self.read_config_file())
|
||||
current_config = trim_config(self.haproxy_cfg)
|
||||
return file_config != current_config
|
||||
|
||||
@classmethod
|
||||
def cleanup_config_file(cls, uuid, state_path):
|
||||
"""Delete config file created when metadata proxy was spawned."""
|
||||
@ -174,31 +214,39 @@ class MetadataDriverBase(object, metaclass=abc.ABCMeta):
|
||||
|
||||
return user, group
|
||||
|
||||
@classmethod
|
||||
def _get_haproxy_configurator(cls, bind_address, port, conf,
|
||||
network_id=None, router_id=None,
|
||||
bind_address_v6=None,
|
||||
bind_interface=None,
|
||||
pid_file=''):
|
||||
metadata_proxy_socket = conf.metadata_proxy_socket
|
||||
user, group = cls._get_metadata_proxy_user_group(conf)
|
||||
configurator = cls.haproxy_configurator()
|
||||
return configurator(network_id,
|
||||
router_id,
|
||||
metadata_proxy_socket,
|
||||
bind_address,
|
||||
port,
|
||||
user,
|
||||
group,
|
||||
conf.state_path,
|
||||
pid_file,
|
||||
conf.metadata_rate_limiting,
|
||||
bind_address_v6,
|
||||
bind_interface)
|
||||
|
||||
@classmethod
|
||||
def _get_metadata_proxy_callback(cls, bind_address, port, conf,
|
||||
network_id=None, router_id=None,
|
||||
bind_address_v6=None,
|
||||
bind_interface=None):
|
||||
def callback(pid_file):
|
||||
metadata_proxy_socket = conf.metadata_proxy_socket
|
||||
user, group = cls._get_metadata_proxy_user_group(conf)
|
||||
configurator = cls.haproxy_configurator()
|
||||
haproxy = configurator(network_id,
|
||||
router_id,
|
||||
metadata_proxy_socket,
|
||||
bind_address,
|
||||
port,
|
||||
user,
|
||||
group,
|
||||
conf.state_path,
|
||||
pid_file,
|
||||
conf.metadata_rate_limiting,
|
||||
bind_address_v6,
|
||||
bind_interface)
|
||||
haproxy = cls._get_haproxy_configurator(
|
||||
bind_address, port, conf, network_id, router_id,
|
||||
bind_address_v6, bind_interface, pid_file)
|
||||
haproxy.create_config_file()
|
||||
proxy_cmd = [HAPROXY_SERVICE, '-f', haproxy.cfg_path]
|
||||
|
||||
return proxy_cmd
|
||||
return [HAPROXY_SERVICE, '-f', haproxy.cfg_path]
|
||||
|
||||
return callback
|
||||
|
||||
@ -238,6 +286,18 @@ class MetadataDriverBase(object, metaclass=abc.ABCMeta):
|
||||
# Do not use the address or interface when DAD fails
|
||||
bind_address_v6 = bind_interface = None
|
||||
|
||||
# If the HAProxy running instance configuration is different from
|
||||
# the one passed in this call, the HAProxy is stopped. The new
|
||||
# configuration will be written to the disk and a new instance
|
||||
# started.
|
||||
haproxy_cfg = cls._get_haproxy_configurator(
|
||||
bind_address, port, conf, network_id=network_id,
|
||||
router_id=router_id, bind_address_v6=bind_address_v6,
|
||||
bind_interface=bind_interface)
|
||||
if haproxy_cfg.is_config_file_obsolete():
|
||||
cls.destroy_monitored_metadata_proxy(
|
||||
monitor, haproxy_cfg.resource_id, conf, ns_name)
|
||||
|
||||
uuid = network_id or router_id
|
||||
callback = cls._get_metadata_proxy_callback(
|
||||
bind_address, port, conf,
|
||||
|
@ -376,9 +376,6 @@ class MetadataAgent(object):
|
||||
resource_type='metadata')
|
||||
self._sb_idl = None
|
||||
self._post_fork_event = threading.Event()
|
||||
# We'll restart all haproxy instances upon start so that they honor
|
||||
# any potential changes in their configuration.
|
||||
self.restarted_metadata_proxy_set = set()
|
||||
self._chassis = None
|
||||
|
||||
@property
|
||||
@ -832,11 +829,6 @@ class MetadataAgent(object):
|
||||
# Ensure the correct checksum in the metadata traffic.
|
||||
self._ensure_datapath_checksum(namespace)
|
||||
|
||||
if net_name not in self.restarted_metadata_proxy_set:
|
||||
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
|
||||
self._process_monitor, net_name, self.conf, namespace)
|
||||
self.restarted_metadata_proxy_set.add(net_name)
|
||||
|
||||
# Spawn metadata proxy if it's not already running.
|
||||
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
|
||||
self._process_monitor, namespace, n_const.METADATA_PORT,
|
||||
|
@ -1103,3 +1103,16 @@ def parse_permitted_ethertypes(permitted_ethertypes):
|
||||
continue
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def read_file(path: str) -> str:
|
||||
"""Return the content of a text file as a string
|
||||
|
||||
The output includes the empty lines too. If the file does not exist,
|
||||
returns an empty string.
|
||||
"""
|
||||
try:
|
||||
with open(path) as file:
|
||||
return file.read()
|
||||
except FileNotFoundError:
|
||||
return ''
|
||||
|
@ -15,12 +15,14 @@
|
||||
import os
|
||||
from os import path
|
||||
import re
|
||||
import typing
|
||||
|
||||
from eventlet.green import subprocess
|
||||
from neutron_lib.utils import helpers
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_utils import fileutils
|
||||
|
||||
from neutron.common import utils
|
||||
from neutron import privileged
|
||||
|
||||
|
||||
@ -52,6 +54,20 @@ def delete_if_exists(_path, remove=os.unlink):
|
||||
fileutils.delete_if_exists(_path, remove=remove)
|
||||
|
||||
|
||||
@privileged.default.entrypoint
|
||||
def read_file(_path: str) -> str:
|
||||
return utils.read_file(_path)
|
||||
|
||||
|
||||
@privileged.default.entrypoint
|
||||
def write_to_tempfile(content: bytes,
|
||||
_path: typing.Optional[str] = None,
|
||||
suffix: str = '',
|
||||
prefix: str = 'tmp'):
|
||||
return fileutils.write_to_tempfile(content, path=_path, suffix=suffix,
|
||||
prefix=prefix)
|
||||
|
||||
|
||||
@privileged.default.entrypoint
|
||||
def execute_process(cmd, _process_input, addl_env):
|
||||
obj, cmd = _create_process(cmd, addl_env=addl_env)
|
||||
|
@ -15,10 +15,15 @@
|
||||
import functools
|
||||
import os
|
||||
import signal
|
||||
import tempfile
|
||||
|
||||
from oslo_utils import fileutils
|
||||
import testscenarios
|
||||
|
||||
from neutron.agent.common import async_process
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.privileged.agent.linux import utils as priv_utils
|
||||
from neutron.tests.functional.agent.linux import test_async_process
|
||||
from neutron.tests.functional import base as functional_base
|
||||
|
||||
@ -172,3 +177,39 @@ class TestFindChildPids(functional_base.BaseSudoTestCase):
|
||||
with open('/proc/sys/kernel/pid_max', 'r') as fd:
|
||||
pid_max = int(fd.readline().strip())
|
||||
self.assertEqual([], utils.find_child_pids(pid_max))
|
||||
|
||||
|
||||
class ReadIfExists(testscenarios.WithScenarios,
|
||||
functional_base.BaseSudoTestCase):
|
||||
scenarios = [
|
||||
('root', {'run_as_root': True}),
|
||||
('non-root', {'run_as_root': False})]
|
||||
|
||||
FILE = """Test file
|
||||
line 2
|
||||
|
||||
line 4
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def _write_file(cls, path='/tmp', run_as_root=False):
|
||||
content = cls.FILE.encode('ascii')
|
||||
if run_as_root:
|
||||
return priv_utils.write_to_tempfile(content, _path=path)
|
||||
else:
|
||||
return fileutils.write_to_tempfile(content, path=path)
|
||||
|
||||
def test_read_if_exists(self):
|
||||
test_file_path = self._write_file(run_as_root=self.run_as_root)
|
||||
content = utils.read_if_exists(test_file_path,
|
||||
run_as_root=self.run_as_root)
|
||||
file = self.FILE
|
||||
self.assertEqual(file, content)
|
||||
|
||||
def test_read_if_exists_no_file(self):
|
||||
temp_dir = tempfile.TemporaryDirectory()
|
||||
content = utils.read_if_exists(
|
||||
os.path.join(temp_dir.name, 'non-existing-file'),
|
||||
run_as_root=self.run_as_root)
|
||||
self.assertEqual('', content)
|
||||
|
@ -889,6 +889,9 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
|
||||
mock.call(FAKE_NETWORK_UUID, cfg.CONF,
|
||||
ns_name=FAKE_NETWORK_DHCP_NS,
|
||||
callback=mock.ANY))
|
||||
mock.patch.object(metadata_driver_base.HaproxyConfiguratorBase,
|
||||
'is_config_file_obsolete',
|
||||
return_value=False).start()
|
||||
self.plugin.get_network_info.return_value = network
|
||||
process_instance = mock.Mock(active=False)
|
||||
with mock.patch.object(metadata_driver.MetadataDriver,
|
||||
@ -1084,7 +1087,9 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
|
||||
process_instance = mock.Mock(active=False)
|
||||
with mock.patch.object(metadata_driver.MetadataDriver,
|
||||
'_get_metadata_proxy_process_manager',
|
||||
return_value=process_instance) as gmppm:
|
||||
return_value=process_instance) as gmppm,\
|
||||
mock.patch.object(metadata_driver_base.MetadataDriverBase,
|
||||
'_get_haproxy_configurator'):
|
||||
self.dhcp.enable_isolated_metadata_proxy(fake_network)
|
||||
gmppm.assert_called_with(FAKE_NETWORK_UUID,
|
||||
cfg.CONF,
|
||||
@ -1183,7 +1188,9 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
|
||||
network.ports = [dhcp_port_this_host, dhcp_port_other_host]
|
||||
self._test_enable_isolated_metadata_proxy_ipv6(network)
|
||||
|
||||
def _test_disable_isolated_metadata_proxy(self, network):
|
||||
@mock.patch.object(metadata_driver_base.HaproxyConfiguratorBase,
|
||||
'is_config_file_obsolete', return_value=False)
|
||||
def _test_disable_isolated_metadata_proxy(self, network, *args):
|
||||
cfg.CONF.set_override('enable_metadata_network', True)
|
||||
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
|
||||
'.destroy_monitored_metadata_proxy')
|
||||
|
@ -58,6 +58,7 @@ from neutron.agent.linux import pd
|
||||
from neutron.agent.linux import ra
|
||||
from neutron.agent.linux import utils as linux_utils
|
||||
from neutron.agent.metadata import driver as metadata_driver
|
||||
from neutron.agent.metadata import driver_base as metadata_driver_base
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.conf.agent import common as agent_config
|
||||
from neutron.conf.agent.l3 import config as l3_config
|
||||
@ -298,7 +299,9 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework):
|
||||
eventlet.sleep(self.conf.ha_vrrp_advert_int + 2)
|
||||
self.assertFalse(agent._update_metadata_proxy.call_count)
|
||||
|
||||
def test_enqueue_state_change_l3_extension(self):
|
||||
@mock.patch.object(metadata_driver_base.MetadataDriverBase,
|
||||
'_get_haproxy_configurator')
|
||||
def test_enqueue_state_change_l3_extension(self, mock_haproxy_conf):
|
||||
self.conf.set_override('ha_vrrp_advert_int', 1)
|
||||
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
|
||||
router_dict = {'id': 'router_id', 'enable_ndp_proxy': True}
|
||||
@ -307,6 +310,9 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework):
|
||||
router_info.router = router_dict
|
||||
agent.router_info['router_id'] = router_info
|
||||
agent.l3_ext_manager.ha_state_change = mock.Mock()
|
||||
haproxy_cfg = mock.Mock()
|
||||
haproxy_cfg.is_config_file_obsolete.return_value = False
|
||||
mock_haproxy_conf.return_value = haproxy_cfg
|
||||
with mock.patch('neutron.agent.linux.ip_lib.'
|
||||
'IpAddrCommand.wait_until_address_ready') as mock_wait:
|
||||
mock_wait.return_value = True
|
||||
|
@ -112,6 +112,9 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
meta_conf.register_meta_conf_opts(
|
||||
meta_conf.METADATA_RATE_LIMITING_OPTS, cfg.CONF,
|
||||
group=meta_conf.RATE_LIMITING_GROUP)
|
||||
self.mock_conf_obsolete = mock.patch.object(
|
||||
driver_base.HaproxyConfiguratorBase,
|
||||
'is_config_file_obsolete').start()
|
||||
|
||||
def test_after_router_updated_called_on_agent_process_update(self):
|
||||
with mock.patch.object(metadata_driver, 'after_router_updated') as f,\
|
||||
@ -153,7 +156,8 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
agent._process_updated_router(router)
|
||||
f.assert_not_called()
|
||||
|
||||
def _test_spawn_metadata_proxy(self, dad_failed=False, rate_limited=False):
|
||||
def _test_spawn_metadata_proxy(self, dad_failed=False, rate_limited=False,
|
||||
is_config_file_obsolete=False):
|
||||
router_id = _uuid()
|
||||
router_ns = 'qrouter-%s' % router_id
|
||||
service_name = 'haproxy'
|
||||
@ -163,6 +167,11 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
cfg.CONF.set_override('metadata_proxy_group', self.EGNAME)
|
||||
cfg.CONF.set_override('metadata_proxy_socket', self.METADATA_SOCKET)
|
||||
cfg.CONF.set_override('debug', True)
|
||||
self.mock_conf_obsolete.return_value = is_config_file_obsolete
|
||||
if is_config_file_obsolete:
|
||||
self.mock_destroy_haproxy = mock.patch.object(
|
||||
driver_base.MetadataDriverBase,
|
||||
'destroy_monitored_metadata_proxy').start()
|
||||
|
||||
with mock.patch(ip_class_path) as ip_mock,\
|
||||
mock.patch(
|
||||
@ -274,6 +283,10 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
self.delete_if_exists.assert_called_once_with(
|
||||
mock.ANY, run_as_root=True)
|
||||
|
||||
if is_config_file_obsolete:
|
||||
self.mock_destroy_haproxy.assert_called_once_with(
|
||||
agent.process_monitor, router_id, agent.conf, router_ns)
|
||||
|
||||
def test_spawn_metadata_proxy(self):
|
||||
self._test_spawn_metadata_proxy()
|
||||
|
||||
@ -294,6 +307,9 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
def test_spawn_metadata_proxy_dad_failed(self):
|
||||
self._test_spawn_metadata_proxy(dad_failed=True)
|
||||
|
||||
def test_spawn_metadata_proxy_no_matching_configurations(self):
|
||||
self._test_spawn_metadata_proxy(is_config_file_obsolete=True)
|
||||
|
||||
@mock.patch.object(driver_base.LOG, 'error')
|
||||
def test_spawn_metadata_proxy_handles_process_exception(self, error_log):
|
||||
process_instance = mock.Mock(active=False)
|
||||
@ -316,29 +332,21 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
|
||||
def test_create_config_file_wrong_user(self):
|
||||
with mock.patch('pwd.getpwnam', side_effect=KeyError):
|
||||
config = metadata_driver.HaproxyConfigurator(_uuid(),
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY,
|
||||
self.EUNAME,
|
||||
self.EGNAME,
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
self.assertRaises(comm_meta.InvalidUserOrGroupException,
|
||||
config.create_config_file)
|
||||
metadata_driver.HaproxyConfigurator, _uuid(),
|
||||
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
|
||||
self.EUNAME, self.EGNAME, mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
|
||||
def test_create_config_file_wrong_group(self):
|
||||
with mock.patch('grp.getgrnam', side_effect=KeyError),\
|
||||
mock.patch('pwd.getpwnam',
|
||||
return_value=test_utils.FakeUser(self.EUNAME)):
|
||||
config = metadata_driver.HaproxyConfigurator(_uuid(),
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY,
|
||||
self.EUNAME,
|
||||
self.EGNAME,
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
self.assertRaises(comm_meta.InvalidUserOrGroupException,
|
||||
config.create_config_file)
|
||||
metadata_driver.HaproxyConfigurator, _uuid(),
|
||||
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
|
||||
self.EUNAME, self.EGNAME, mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
|
||||
def test_destroy_monitored_metadata_proxy(self):
|
||||
mproxy_process = mock.Mock(active=False)
|
||||
|
@ -446,8 +446,7 @@ class TestMetadataAgent(base.BaseTestCase):
|
||||
ip_wrap, 'add_veth',
|
||||
return_value=[ip_lib.IPDevice('ip1'),
|
||||
ip_lib.IPDevice('ip2')]) as add_veth,\
|
||||
mock.patch.object(
|
||||
linux_utils, 'delete_if_exists') as mock_delete,\
|
||||
mock.patch.object(linux_utils, 'delete_if_exists'), \
|
||||
mock.patch.object(
|
||||
driver.MetadataDriver,
|
||||
'spawn_monitored_metadata_proxy') as spawn_mdp, \
|
||||
@ -488,7 +487,6 @@ class TestMetadataAgent(base.BaseTestCase):
|
||||
self.assertCountEqual(expected_call,
|
||||
ip_addr_add_multiple.call_args.args[0])
|
||||
# Check that metadata proxy has been spawned
|
||||
mock_delete.assert_called_once_with(mock.ANY, run_as_root=True)
|
||||
spawn_mdp.assert_called_once_with(
|
||||
mock.ANY, nemaspace_name, 80, mock.ANY,
|
||||
bind_address=n_const.METADATA_V4_IP, network_id=net_name,
|
||||
|
@ -105,7 +105,10 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
mock.patch(
|
||||
'neutron.agent.linux.ip_lib.'
|
||||
'IpAddrCommand.wait_until_address_ready',
|
||||
return_value=True):
|
||||
return_value=True),\
|
||||
mock.patch.object(driver_base.HaproxyConfiguratorBase,
|
||||
'is_config_file_obsolete',
|
||||
return_value=False):
|
||||
cfg_file = os.path.join(
|
||||
metadata_driver.HaproxyConfigurator.get_config_path(
|
||||
agent.conf.state_path),
|
||||
@ -184,7 +187,9 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
|
||||
with mock.patch.object(metadata_driver.MetadataDriver,
|
||||
'_get_metadata_proxy_process_manager',
|
||||
return_value=process_instance):
|
||||
return_value=process_instance),\
|
||||
mock.patch.object(driver_base.MetadataDriverBase,
|
||||
'_get_haproxy_configurator'):
|
||||
process_monitor = mock.Mock()
|
||||
network_id = 123456
|
||||
|
||||
@ -201,22 +206,18 @@ class TestMetadataDriverProcess(base.BaseTestCase):
|
||||
|
||||
def test_create_config_file_wrong_user(self):
|
||||
with mock.patch('pwd.getpwnam', side_effect=KeyError):
|
||||
config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY, self.EUNAME,
|
||||
self.EGNAME, mock.ANY,
|
||||
mock.ANY, mock.ANY)
|
||||
self.assertRaises(comm_meta.InvalidUserOrGroupException,
|
||||
config.create_config_file)
|
||||
metadata_driver.HaproxyConfigurator, mock.ANY,
|
||||
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
|
||||
self.EUNAME, self.EGNAME, mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
|
||||
def test_create_config_file_wrong_group(self):
|
||||
with mock.patch('grp.getgrnam', side_effect=KeyError),\
|
||||
mock.patch('pwd.getpwnam',
|
||||
return_value=test_utils.FakeUser(self.EUNAME)):
|
||||
config = metadata_driver.HaproxyConfigurator(mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY,
|
||||
mock.ANY, self.EUNAME,
|
||||
self.EGNAME, mock.ANY,
|
||||
mock.ANY, mock.ANY)
|
||||
self.assertRaises(comm_meta.InvalidUserOrGroupException,
|
||||
config.create_config_file)
|
||||
metadata_driver.HaproxyConfigurator, mock.ANY,
|
||||
mock.ANY, mock.ANY, mock.ANY, mock.ANY,
|
||||
self.EUNAME, self.EGNAME, mock.ANY, mock.ANY,
|
||||
mock.ANY)
|
||||
|
Loading…
Reference in New Issue
Block a user