Merge "nova-net: Remove firewall support (pt. 1)"

This commit is contained in:
Zuul 2020-01-08 01:30:07 +00:00 committed by Gerrit Code Review
commit ba266419f1
5 changed files with 13 additions and 76 deletions

View File

@ -1184,14 +1184,6 @@ class ComputeManager(manager.Manager):
if expect_running and CONF.resume_guests_state_on_host_boot:
self._resume_guests_state(context, instance, net_info)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'firewall rules', instance=instance)
def _resume_guests_state(self, context, instance, net_info):
LOG.info('Rebooting instance after nova-compute restart.',
@ -1406,9 +1398,6 @@ class ComputeManager(manager.Manager):
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
self._validate_pinning_configuration(instances)
@ -1446,8 +1435,6 @@ class ComputeManager(manager.Manager):
context, already_handled, nodes_by_uuid.keys())
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
if instances:
# We only send the instance info to the scheduler on startup
# if there is anything to send, otherwise this host might
@ -1572,6 +1559,7 @@ class ComputeManager(manager.Manager):
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
# TODO(stephenfin): Remove this as it's nova-network only
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
@ -1582,15 +1570,7 @@ class ComputeManager(manager.Manager):
Synchronize the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'security groups.', instance=instance)
return _sync_refresh()
pass
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
@ -7781,9 +7761,7 @@ class ComputeManager(manager.Manager):
context, instance, refresh_conn_info=True,
bdms=bdms)
# The driver pre_live_migration will plug vifs on the host. We call
# plug_vifs before calling ensure_filtering_rules_for_instance, to
# ensure bridge is set up.
# The driver pre_live_migration will plug vifs on the host
migrate_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
@ -7804,14 +7782,6 @@ class ComputeManager(manager.Manager):
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
except Exception:
# If we raise, migrate_data with the updated attachment ids
# will not be returned to the source host for rollback.
@ -8339,11 +8309,6 @@ class ComputeManager(manager.Manager):
ctxt, instance, self.host,
action=fields.NotificationAction.LIVE_MIGRATION_POST,
phase=fields.NotificationPhase.START)
# Releasing security group ingress rule.
LOG.debug('Calling driver.unfilter_instance from _post_live_migration',
instance=instance)
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }

View File

@ -1120,6 +1120,7 @@ class ComputeAPI(object):
server=_compute_host(None, instance), version=version)
cctxt.cast(ctxt, 'rescue_instance', **msg_args)
# Remove as it only supports nova network
def reset_network(self, ctxt, instance):
version = '5.0'
cctxt = self.router.client(ctxt).prepare(
@ -1452,6 +1453,7 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'unquiesce_instance', instance=instance,
mapping=mapping)
# TODO(stephenfin): Remove this as it's nova-network only
def refresh_instance_security_rules(self, ctxt, instance, host):
version = '5.0'
client = self.router.client(ctxt)

View File

@ -6114,14 +6114,13 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(instance.vm_state, vm_states.ERROR)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(fake.FakeDriver, 'ensure_filtering_rules_for_instance')
@mock.patch.object(fake.FakeDriver, 'pre_live_migration')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid',
return_value=objects.BlockDeviceMappingList())
def test_pre_live_migration_works_correctly(self, mock_get_bdms,
mock_notify,
mock_pre, mock_ensure):
mock_pre):
# Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self)
@ -6132,7 +6131,6 @@ class ComputeTestCase(BaseTestCase,
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
nw_info = fake_network.fake_get_instance_nw_info(self)
fake_notifier.NOTIFICATIONS = []
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False)
@ -6169,8 +6167,6 @@ class ComputeTestCase(BaseTestCase,
'root_device_name': None,
'block_device_mapping': []},
mock.ANY, mock.ANY, mock.ANY)
mock_ensure.assert_called_once_with(test.MatchType(objects.Instance),
nw_info)
mock_setup.assert_called_once_with(c, instance, self.compute.host)
# cleanup
@ -6415,13 +6411,12 @@ class ComputeTestCase(BaseTestCase,
# cleanup
instance.destroy()
@mock.patch.object(fake.FakeDriver, 'unfilter_instance')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'post_live_migration_at_destination')
@mock.patch.object(compute_manager.InstanceEvents,
'clear_events_for_instance')
def test_post_live_migration_no_shared_storage_working_correctly(self,
mock_clear, mock_post, mock_unfilter):
mock_clear, mock_post):
"""Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
@ -6473,7 +6468,6 @@ class ComputeTestCase(BaseTestCase,
self.assertIn('cleanup', result)
self.assertTrue(result['cleanup'])
mock_unfilter.assert_called_once_with(instance, [])
mock_migrate.assert_called_once_with(c, instance, migration)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
@ -6506,7 +6500,6 @@ class ComputeTestCase(BaseTestCase,
# creating mocks
with test.nested(
mock.patch.object(self.compute.driver, 'post_live_migration'),
mock.patch.object(self.compute.driver, 'unfilter_instance'),
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
@ -6520,7 +6513,7 @@ class ComputeTestCase(BaseTestCase,
mock.patch.object(self.compute, 'update_available_resource'),
mock.patch.object(migration_obj, 'save'),
) as (
post_live_migration, unfilter_instance,
post_live_migration,
migrate_instance_start, post_live_migration_at_destination,
post_live_migration_at_source, setup_networks_on_host,
clear_events, update_available_resource, mig_save
@ -6539,7 +6532,6 @@ class ComputeTestCase(BaseTestCase,
'root_device_name': None,
'block_device_mapping': []},
migrate_data)])
unfilter_instance.assert_has_calls([mock.call(instance, [])])
migration = {'source_compute': srchost,
'dest_compute': dest, }
migrate_instance_start.assert_has_calls([
@ -6588,7 +6580,6 @@ class ComputeTestCase(BaseTestCase,
# creating mocks
with test.nested(
mock.patch.object(self.compute.driver, 'post_live_migration'),
mock.patch.object(self.compute.driver, 'unfilter_instance'),
mock.patch.object(self.compute.network_api,
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
@ -6603,7 +6594,7 @@ class ComputeTestCase(BaseTestCase,
mock.patch.object(self.compute, 'update_available_resource'),
mock.patch.object(migration_obj, 'save'),
) as (
post_live_migration, unfilter_instance,
post_live_migration,
migrate_instance_start, post_live_migration_at_destination,
post_live_migration_at_source, setup_networks_on_host,
clear_events, update_available_resource, mig_save

View File

@ -734,8 +734,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@mock.patch.object(fake_driver.FakeDriver, 'init_host')
@mock.patch.object(fake_driver.FakeDriver, 'filter_defer_apply_on')
@mock.patch.object(fake_driver.FakeDriver, 'filter_defer_apply_off')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(manager.ComputeManager,
@ -747,9 +745,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
def _do_mock_calls(mock_update_scheduler, mock_inst_init,
mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_filter_off, mock_filter_on, mock_init_host,
mock_error_interrupted, mock_get_nodes,
defer_iptables_apply):
mock_init_host,
mock_error_interrupted, mock_get_nodes):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
@ -760,9 +757,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.init_host()
if defer_iptables_apply:
self.assertTrue(mock_filter_on.called)
mock_validate_pinning.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
self.context, {uuids.our_node_uuid: our_node})
@ -771,8 +765,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.call(self.context, inst_list[1]),
mock.call(self.context, inst_list[2])])
if defer_iptables_apply:
self.assertTrue(mock_filter_off.called)
mock_init_host.assert_called_once_with(host=our_host)
mock_host_get.assert_called_once_with(self.context, our_host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@ -784,13 +776,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {inst.uuid for inst in inst_list},
mock_get_nodes.return_value.keys())
# Test with defer_iptables_apply
self.flags(defer_iptables_apply=True)
_do_mock_calls(defer_iptables_apply=True)
# Test without defer_iptables_apply
self.flags(defer_iptables_apply=False)
_do_mock_calls(defer_iptables_apply=False)
_do_mock_calls()
@mock.patch('nova.compute.manager.ComputeManager._get_nodes')
@mock.patch('nova.compute.manager.ComputeManager.'
@ -5507,14 +5493,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertFalse(mock_delete.called)
self.assertFalse(mock_sync.called)
def test_refresh_instance_security_rules_takes_non_object(self):
inst = objects.Instance(uuid=uuids.instance)
with mock.patch.object(self.compute.driver,
'refresh_instance_security_rules') as mock_r:
self.compute.refresh_instance_security_rules(self.context, inst)
self.assertIsInstance(mock_r.call_args_list[0][0][0],
objects.Instance)
def test_set_instance_obj_error_state_with_clean_task_state(self):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)

View File

@ -709,6 +709,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
timeout=1234, call_monitor_timeout=60,
_return_value=objects_block_dev.BlockDeviceMapping())
# TODO(stephenfin): Remove this since it's nova-network only
def test_refresh_instance_security_rules(self):
expected_args = {'instance': self.fake_instance_obj}
self._test_compute_api('refresh_instance_security_rules', 'cast',