Remove _migrate_python_ns_metadata_proxy_if_needed method

It was added as temporary helper during migration process
and was marked to delete in Queens cycle.
Now we are in Rocky so I think we are fine to remove it
finally.

Change-Id: Iacf592841559d392b59864d507dc89ef028cbf05
This commit is contained in:
Slawek Kaplonski 2018-07-26 22:11:21 +02:00
parent e22ec6c609
commit f046031456
5 changed files with 1 additions and 113 deletions

View File

@ -24,13 +24,6 @@ dhcp_release6: CommandFilter, dhcp_release6, root
# haproxy # haproxy
haproxy: RegExpFilter, haproxy, root, haproxy, -f, .* haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
# RHEL invocation of the metadata proxy will report /usr/bin/python
# TODO(dalvarez): Remove kill_metadata* filters in Q release since
# neutron-ns-metadata-proxy is now replaced by haproxy. We keep them for now
# for the migration process
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_metadata35: KillFilter, root, python3.5, -9
# ip_lib # ip_lib
ip: IpFilter, ip, root ip: IpFilter, ip, root

View File

@ -19,13 +19,7 @@ radvd: CommandFilter, radvd, root
# haproxy # haproxy
haproxy: RegExpFilter, haproxy, root, haproxy, -f, .* haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP
# RHEL invocation of the metadata proxy will report /usr/bin/python
# TODO(dalvarez): Remove kill_metadata* filters in Q release since
# neutron-ns-metadata-proxy is now replaced by haproxy. We keep them for now
# for the migration process
kill_metadata: KillFilter, root, python, -15, -9
kill_metadata7: KillFilter, root, python2.7, -15, -9
kill_metadata35: KillFilter, root, python3.5, -15, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP

View File

@ -239,31 +239,10 @@ class MetadataDriver(object):
pm = cls._get_metadata_proxy_process_manager(uuid, conf, pm = cls._get_metadata_proxy_process_manager(uuid, conf,
ns_name=ns_name, ns_name=ns_name,
callback=callback) callback=callback)
# TODO(dalvarez): Remove in Q cycle. This will kill running instances
# of old ns-metadata-proxy Python version in order to be replaced by
# haproxy. This will help with upgrading and shall be removed in next
# cycle.
cls._migrate_python_ns_metadata_proxy_if_needed(pm)
pm.enable() pm.enable()
monitor.register(uuid, METADATA_SERVICE_NAME, pm) monitor.register(uuid, METADATA_SERVICE_NAME, pm)
cls.monitors[router_id] = pm cls.monitors[router_id] = pm
@staticmethod
def _migrate_python_ns_metadata_proxy_if_needed(pm):
"""Kill running Python version of ns-metadata-proxy.
This function will detect if the current metadata proxy process is
running the old Python version and kill it so that the new haproxy
version is spawned instead.
"""
# Read cmdline to a local var to avoid reading twice from /proc file
cmdline = pm.cmdline
if cmdline and 'haproxy' not in cmdline:
LOG.debug("Migrating old instance of python ns-metadata proxy to "
"new one based on haproxy (%s)", cmdline)
pm.disable()
@classmethod @classmethod
def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name): def destroy_monitored_metadata_proxy(cls, monitor, uuid, conf, ns_name):
monitor.unregister(uuid, METADATA_SERVICE_NAME) monitor.unregister(uuid, METADATA_SERVICE_NAME)

View File

@ -12,24 +12,19 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import functools
import os.path import os.path
import time import time
import fixtures
from oslo_config import cfg
import webob import webob
import webob.dec import webob.dec
import webob.exc import webob.exc
from neutron.agent.linux import dhcp from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import utils from neutron.agent.linux import utils
from neutron.tests.common import machine_fixtures from neutron.tests.common import machine_fixtures
from neutron.tests.common import net_helpers from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l3 import framework from neutron.tests.functional.agent.l3 import framework
from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional.agent.linux import helpers
from neutron.tests.functional.agent.linux import simple_daemon
METADATA_REQUEST_TIMEOUT = 60 METADATA_REQUEST_TIMEOUT = 60
METADATA_REQUEST_SLEEP = 5 METADATA_REQUEST_SLEEP = 5
@ -122,57 +117,6 @@ class MetadataL3AgentTestCase(framework.L3AgentTestFramework):
# Check status code # Check status code
self.assertIn(str(webob.exc.HTTPOk.code), firstline.split()) self.assertIn(str(webob.exc.HTTPOk.code), firstline.split())
@staticmethod
def _make_cmdline_callback(uuid):
def _cmdline_callback(pidfile):
cmdline = ["python", simple_daemon.__file__,
"--uuid=%s" % uuid,
"--pid_file=%s" % pidfile]
return cmdline
return _cmdline_callback
def test_haproxy_migration_path(self):
"""Test the migration path for haproxy.
This test will launch the simple_daemon Python process before spawning
haproxy. When launching haproxy, it will be detected and killed, as
it's running on the same pidfile and with the router uuid in its
cmdline.
"""
# Make sure that external_pids configuration option is the same for
# simple_daemon and haproxy so that both work on the same pid_file.
get_temp_file_path = functools.partial(
self.get_temp_file_path,
root=self.useFixture(fixtures.TempDir()))
cfg.CONF.set_override('external_pids',
get_temp_file_path('external/pids'))
self.agent.conf.set_override('external_pids',
get_temp_file_path('external/pids'))
router_info = self.generate_router_info(enable_ha=False)
# Spawn the simple_daemon process in the background using the generated
# router uuid. We are not registering it within ProcessMonitor so that
# it doesn't get respawned once killed.
_callback = self._make_cmdline_callback(router_info['id'])
pm = external_process.ProcessManager(
conf=cfg.CONF,
uuid=router_info['id'],
default_cmd_callback=_callback)
pm.enable()
self.addCleanup(pm.disable)
# Make sure that simple_daemon is running
self.assertIn('simple_daemon', pm.cmdline)
# Create the router. This is expected to launch haproxy after killing
# the simple_daemon process.
self.manage_router(self.agent, router_info)
# Make sure that it was killed and replaced by haproxy
self.assertNotIn('simple_daemon', pm.cmdline)
self.assertIn('haproxy', pm.cmdline)
class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase): class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user. """Test metadata proxy with least privileged user.

View File

@ -197,25 +197,3 @@ class TestMetadataDriverProcess(base.BaseTestCase):
mock.ANY, mock.ANY) mock.ANY, mock.ANY)
self.assertRaises(metadata_driver.InvalidUserOrGroupException, self.assertRaises(metadata_driver.InvalidUserOrGroupException,
config.create_config_file) config.create_config_file)
def test__migrate_python_ns_metadata_proxy_if_needed(self):
agent = l3_agent.L3NATAgent('localhost')
with mock.patch(
'neutron.agent.linux.external_process.ProcessManager')\
as mock_pm:
mock_pm.cmdline = (
'python neutron-ns-metadata-proxy')
(agent.metadata_driver
._migrate_python_ns_metadata_proxy_if_needed(mock_pm))
mock_pm.disable.assert_called_once_with()
def test__migrate_python_ns_metadata_proxy_if_needed_not_called(self):
agent = l3_agent.L3NATAgent('localhost')
with mock.patch(
'neutron.agent.linux.external_process.ProcessManager')\
as mock_pm:
mock_pm.cmdline = (
'haproxy -f foo.cfg')
(agent.metadata_driver
._migrate_python_ns_metadata_proxy_if_needed(mock_pm))
mock_pm.disable.assert_not_called()