nova-net: Remove final references to nova-network

Strip out everything matching '(is|use)_neutron', except the tests for
nova-network code and two other places that these tests rely on. Along
the way, remove a whole load of apparently unnecessary mocking that
clearly wasn't caught when we switched over the bulk of testing to use
the neutron network driver.

Change-Id: Ifa9c5c468400261a5e1f66b72c575845173a4f8f
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane 2019-12-16 09:57:34 +00:00
parent 656c18eaf2
commit df00177093
34 changed files with 108 additions and 439 deletions

View File

@ -15,22 +15,13 @@
# under the License.
from nova import manager
from nova.network import driver
from nova import utils
# TODO(stephenfin): Remove this as it's no longer necessary.
class MetadataManager(manager.Manager):
"""Metadata Manager.
This class manages the Metadata API service initialization. Currently, it
just adds an iptables filter rule for the metadata service.
This class manages the Metadata API service initialization.
"""
def __init__(self, *args, **kwargs):
super(MetadataManager, self).__init__(*args, **kwargs)
if not utils.is_neutron():
# NOTE(mikal): we only add iptables rules if we're running
# under nova-network. This code should go away when the
# deprecation of nova-network is complete.
self.network_driver = driver.load_network_driver()
self.network_driver.metadata_accept()

View File

@ -383,10 +383,7 @@ class ServersController(wsgi.Controller):
def _validate_network_id(net_id, network_uuids):
"""Validates that a requested network id.
This method performs two checks:
1. That the network id is in the proper uuid format.
2. That the network is not a duplicate when using nova-network.
This method checks that the network id is in the proper UUID format.
:param net_id: The network id to validate.
:param network_uuids: A running list of requested network IDs that have
@ -398,11 +395,6 @@ class ServersController(wsgi.Controller):
"not in proper format (%s)") % net_id
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if net_id in network_uuids and not utils.is_neutron():
expl = _("Duplicate networks (%s) are not allowed") % net_id
raise exc.HTTPBadRequest(explanation=expl)
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
@ -429,10 +421,6 @@ class ServersController(wsgi.Controller):
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if request.address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': the two cannot be "

View File

@ -44,9 +44,8 @@ def network_dict(network):
class TenantNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
def __init__(self):
super(TenantNetworkController, self).__init__()
# TODO(stephenfin): 'network_api' is only being passed for use by tests
self.network_api = nova.network.API()
self._default_networks = []

View File

@ -1854,13 +1854,10 @@ class API(base.Base):
instance.system_metadata.update(system_meta)
if CONF.use_neutron:
# For Neutron we don't actually store anything in the database, we
# proxy the security groups on the instance from the ports
# attached to the instance.
instance.security_groups = objects.SecurityGroupList()
else:
instance.security_groups = security_groups
# Since the removal of nova-network, we don't actually store anything
# in the database. Instead, we proxy the security groups on the
# instance from the ports attached to the instance.
instance.security_groups = objects.SecurityGroupList()
self._populate_instance_names(instance, num_instances, index)
instance.shutdown_terminate = shutdown_terminate
@ -1964,9 +1961,8 @@ class API(base.Base):
if requested_networks and max_count is not None and max_count > 1:
self._check_multiple_instances_with_specified_ip(
requested_networks)
if utils.is_neutron():
self._check_multiple_instances_with_neutron_ports(
requested_networks)
self._check_multiple_instances_with_neutron_ports(
requested_networks)
if availability_zone:
available_zones = availability_zones.\
@ -2395,15 +2391,6 @@ class API(base.Base):
delete_type if delete_type != 'soft_delete' else 'delete'):
elevated = context.elevated()
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in
# SHELVED_OFFLOADED state, instance['host'] is None, here, use
# shelved_host as host to deallocate network info and reset
# instance['host'] after that. Here we shouldn't use
# instance.save(), because this will mislead user who may think
# the instance's host has been changed, and actually, the
# instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:

View File

@ -2220,33 +2220,9 @@ class ComputeManager(manager.Manager):
retry['exc'] = traceback.format_exception(*sys.exc_info())
# This will be used for setting the instance fault message
retry['exc_reason'] = e.kwargs['reason']
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
# NOTE(mriedem): Always deallocate networking when using Neutron.
# This is to unbind any ports that the user supplied in the server
# create request, or delete any ports that nova created which were
# meant to be bound to this host. This check intentionally bypasses
# the result of deallocate_networks_on_reschedule because the
# default value in the driver is False, but that method was really
# only meant for Ironic and should be removed when nova-network is
# removed (since is_neutron() will then always be True).
# NOTE(vladikr): SR-IOV ports should be deallocated to
# allow new sriov pci devices to be allocated on a new host.
# Otherwise, if devices with pci addresses are already allocated
# on the destination host, the instance will fail to spawn.
# info_cache.network_info should be present at this stage.
if (self.driver.deallocate_networks_on_reschedule(instance) or
utils.is_neutron() or
self.deallocate_sriov_ports_on_reschedule(instance)):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# to cleanup those network resources setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._nil_out_instance_obj_host_and_node(instance)
instance.task_state = task_states.SCHEDULING
@ -2290,24 +2266,6 @@ class ComputeManager(manager.Manager):
clean_task_state=True)
return build_results.FAILED
def deallocate_sriov_ports_on_reschedule(self, instance):
"""Determine if networks are needed to be deallocated before reschedule
Check the cached network info for any assigned SR-IOV ports.
SR-IOV ports should be deallocated prior to rescheduling
in order to allow new sriov pci devices to be allocated on a new host.
"""
info_cache = instance.info_cache
def _has_sriov_port(vif):
return vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV
if (info_cache and info_cache.network_info):
for vif in info_cache.network_info:
if _has_sriov_port(vif):
return True
return False
@staticmethod
def _get_scheduler_hints(filter_properties, request_spec=None):
"""Helper method to get scheduler hints.
@ -4826,7 +4784,7 @@ class ComputeManager(manager.Manager):
network_info = instance.get_network_info()
events = []
deadline = CONF.vif_plugging_timeout
if deadline and utils.is_neutron() and network_info:
if deadline and network_info:
events = network_info.get_bind_time_events(migration)
if events:
LOG.debug('Will wait for bind-time events: %s', events)
@ -6392,13 +6350,6 @@ class ComputeManager(manager.Manager):
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
# NOTE(mriedem): cleanup_instance_network_on_host assumes there are
# multiple host bindings per port which is not the case with the
# shelve/unshelve flow. Doing so will result in failures to update the
# port binding on unshelve (since we would have deleted it here).
if not utils.is_neutron():
self.network_api.cleanup_instance_network_on_host(
context, instance, instance.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
@ -7825,7 +7776,7 @@ class ComputeManager(manager.Manager):
def _get_neutron_events_for_live_migration(instance):
# We don't generate events if CONF.vif_plugging_timeout=0
# meaning that the operator disabled using them.
if CONF.vif_plugging_timeout and utils.is_neutron():
if CONF.vif_plugging_timeout:
return [('network-vif-plugged', vif['id'])
for vif in instance.get_network_info()]
else:
@ -8674,7 +8625,7 @@ class ComputeManager(manager.Manager):
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
elif utils.is_neutron():
else:
# The port binding profiles need to be cleaned up.
with errors_out_migration_ctxt(migration):
try:
@ -8798,8 +8749,7 @@ class ComputeManager(manager.Manager):
# bindings instead of the compute driver. For example IronicDriver
# manages the port binding for baremetal instance ports, hence,
# external intervention with the binding is not desired.
if (not utils.is_neutron() or
self.driver.manages_network_binding_host_id()):
if self.driver.manages_network_binding_host_id():
return False
search_opts = {'device_id': instance.uuid,

View File

@ -221,23 +221,12 @@ service.
help="""
Domain name used to configure FQDN for instances.
This option has two purposes:
#. For *neutron* and *nova-network* users, it is used to configure a
fully-qualified domain name for instance hostnames. If unset, only the
hostname without a domain will be configured.
#. (Deprecated) For *nova-network* users, this option configures the DNS
domains used for the DHCP server. Refer to the ``--domain`` option of the
``dnsmasq`` utility for more information. Like *nova-network* itself, this
purpose is deprecated.
Configure a fully-qualified domain name for instance hostnames. If unset, only
the hostname without a domain will be configured.
Possible values:
* Any string that is a valid domain name.
Related options:
* ``use_neutron``
"""),
]

View File

@ -877,12 +877,12 @@ def privsep_imports_not_aliased(logical_line, filename):
"""Do not abbreviate or alias privsep module imports.
When accessing symbols under nova.privsep in code or tests, the full module
path (e.g. nova.privsep.linux_net.delete_bridge(...)) should be used
path (e.g. nova.privsep.path.readfile(...)) should be used
explicitly rather than importing and using an alias/abbreviation such as:
from nova.privsep import linux_net
from nova.privsep import path
...
linux_net.delete_bridge(...)
path.readfile(...)
See Ief177dbcb018da6fbad13bb0ff153fc47292d5b9.

View File

@ -1151,8 +1151,8 @@ class API(base_api.NetworkAPI):
# with neutron's view of the world. Since address is a 255-char
# string we can namespace it with our port id. Using '/' should
# be safely excluded from MAC address notations as well as
# UUIDs. We could stop doing this when we remove
# nova-network, but we'd need to leave the read translation in
# UUIDs. We can stop doing this now that we've removed
# nova-network, but we need to leave the read translation in
# for longer than that of course.
vifobj.address = '%s/%s' % (updated_port['mac_address'],
updated_port['id'])

View File

@ -90,10 +90,6 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
:param allowed: the range of characters allowed, but not used because
Neutron is allowing any characters.
"""
# NOTE: If using nova-network as the backend, min_length is 1. However
# if using Neutron, Nova has allowed empty string as its history.
# So this min_length should be 0 for passing the existing requests.
utils.check_string_length(value, name=property, min_length=0,
max_length=255)

View File

@ -49,6 +49,7 @@ class NetworkRequest(obj_base.NovaObject):
def obj_load_attr(self, attr):
setattr(self, attr, None)
# TODO(stephenfin): Drop the two item tuple case when we drop it entirely
def to_tuple(self):
address = str(self.address) if self.address is not None else None
if utils.is_neutron():
@ -56,6 +57,7 @@ class NetworkRequest(obj_base.NovaObject):
else:
return self.network_id, address
# TODO(stephenfin): Drop the two item tuple case when we drop it entirely
@classmethod
def from_tuple(cls, net_tuple):
if len(net_tuple) == 4:

View File

@ -182,6 +182,7 @@ class Quotas(base.NovaObject):
user_id=None):
# TODO(melwitt): We won't have per project resources after nova-network
# is removed.
# TODO(stephenfin): We need to do something here now...but what?
per_user = (user_id and
resource not in db_api.quota_get_per_project_resources())
quota_ref = (api_models.ProjectUserQuota() if per_user
@ -204,6 +205,7 @@ class Quotas(base.NovaObject):
user_id=None):
# TODO(melwitt): We won't have per project resources after nova-network
# is removed.
# TODO(stephenfin): We need to do something here now...but what?
per_user = (user_id and
resource not in db_api.quota_get_per_project_resources())
model = api_models.ProjectUserQuota if per_user else api_models.Quota

View File

@ -86,7 +86,6 @@ class ApiSampleTestBaseV21(testscenarios.WithScenarios,
]
def setUp(self):
self.flags(use_ipv6=False)
self.flags(glance_link_prefix=self._get_glance_host(),
compute_link_prefix=self._get_host(),
group='api')

View File

@ -38,9 +38,9 @@ class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV21):
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_post,
'/os-floating-ips-bulk',
{"ip_range": "192.168.1.0/24",
"pool": CONF.default_floating_pool,
"interface": CONF.public_interface})
{'ip_range': '192.168.1.0/24',
'pool': 'nova',
'interface': 'eth0'})
self.assertEqual(410, ex.response.status_code)
def test_floating_ips_bulk_delete(self):

View File

@ -252,11 +252,6 @@ class _IntegratedTestBase(test.TestCase):
def setUp(self):
super(_IntegratedTestBase, self).setUp()
# NOTE(mikal): this is used to stub away privsep helpers
def fake_noop(*args, **kwargs):
return None
self.stub_out('nova.privsep.linux_net.bind_ip', fake_noop)
self.fake_image_service =\
nova.tests.unit.image.fake.stub_out_image_service(self)

View File

@ -81,11 +81,6 @@ class NotificationSampleTestBase(test.TestCase,
self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
# NOTE(mikal): this is used to stub away privsep helpers
def fake_noop(*args, **kwargs):
return None
self.stub_out('nova.privsep.linux_net.bind_ip', fake_noop)
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

View File

@ -94,19 +94,6 @@ FAKE_USER_NETWORKS = [
},
]
NEW_NETWORK = {
"network": {
"bridge_interface": "eth0",
"cidr": "10.20.105.0/24",
"label": "new net 111",
"vlan_start": 111,
"multi_host": False,
'dhcp_server': '10.0.0.1',
'enable_dhcp': True,
'share_address': False,
}
}
class FakeNetworkAPI(object):
@ -145,6 +132,7 @@ class FakeNetworkAPI(object):
# However, 'injected' value can be None if neutron.
# So here changes the value to False just for passing
# following _from_db_object().
# TODO(stephenfin): Fix this
network['injected'] = False
return objects.Network._from_db_object(context,
objects.Network(),
@ -161,7 +149,6 @@ class NetworksTestV21(test.NoDBTestCase):
self.fake_network_api = FakeNetworkAPI()
self._setup()
fakes.stub_out_networking(self)
self.new_network = copy.deepcopy(NEW_NETWORK)
self.non_admin_req = fakes.HTTPRequest.blank(
'', project_id=fakes.FAKE_PROJECT_ID)
self.admin_req = fakes.HTTPRequest.blank('',

View File

@ -70,7 +70,6 @@ from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import utils as nova_utils
@ -227,7 +226,6 @@ class ControllerTest(test.TestCase):
def setUp(self):
super(ControllerTest, self).setUp()
self.flags(use_ipv6=False)
fakes.stub_out_nw_api(self)
fakes.stub_out_key_pair_funcs(self)
fake.stub_out_image_service(self)
@ -261,7 +259,6 @@ class ControllerTest(test.TestCase):
policy.reset()
policy.init()
self.addCleanup(policy.reset)
fake_network.stub_out_nw_api_get_instance_nw_info(self)
# Assume that anything that hits the compute API and looks for a
# RequestSpec doesn't care about it, since testing logic that deep
# should be done in nova.tests.unit.compute.test_compute_api.
@ -464,7 +461,6 @@ class ServersControllerTest(ControllerTest):
}
def test_get_server_by_id(self):
self.flags(use_ipv6=True)
image_bookmark = "http://localhost/%s/images/10" % self.project_id
flavor_bookmark = "http://localhost/%s/flavors/2" % self.project_id
@ -4132,8 +4128,6 @@ class ServersControllerCreateTest(test.TestCase):
self.stub_out('nova.db.api.instance_update', instance_update)
self.stub_out('nova.db.api.instance_update_and_get_original',
server_update_and_get_original)
self.stub_out('nova.network.manager.VlanManager.allocate_fixed_ip',
lambda *a, **kw: None)
self.body = {
'server': {
'name': 'server_test',
@ -6999,7 +6993,6 @@ class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
self.flags(use_ipv6=True)
fakes.stub_out_nw_api(self)
self.flags(group='glance', api_servers=['http://localhost:9292'])
nw_cache_info = self._generate_nw_cache_info()
@ -7017,19 +7010,6 @@ class ServersViewBuilderTest(test.TestCase):
vm_state=vm_states.ACTIVE,
power_state=1)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['b33f::fdee:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
fakes.stub_out_nw_api_get_instance_nw_info(self, nw_info)
fakes.stub_out_secgroup_api(
self, security_groups=[{'name': 'default'}])

View File

@ -148,7 +148,6 @@ class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(network_manager='nova.network.manager.FlatManager')
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
@ -1801,8 +1800,8 @@ class ComputeTestCase(BaseTestCase,
orig_update = self.compute._instance_update
# Make this work for both neutron and nova-network by stubbing out
# allocate_for_instance to return a fake network_info list of VIFs.
# Stub out allocate_for_instance to return a fake network_info list of
# VIFs
ipv4_ip = network_model.IP(version=4, address='192.168.1.100')
ipv4_subnet = network_model.Subnet(ips=[ipv4_ip])
ipv6_ip = network_model.IP(version=6, address='2001:db8:0:1::1')
@ -4637,7 +4636,6 @@ class ComputeTestCase(BaseTestCase,
("resume_instance", task_states.RESUMING),
]
self._stub_out_resize_network_methods()
instance = self._create_fake_instance_obj()
for operation in actions:
if 'revert_resize' in operation:
@ -4696,14 +4694,6 @@ class ComputeTestCase(BaseTestCase,
self.compute.terminate_instance(self.context, instance,
bdms=[])
def _stub_out_resize_network_methods(self):
def fake(cls, ctxt, instance, *args, **kwargs):
pass
self.stub_out('nova.network.api.API.setup_networks_on_host', fake)
self.stub_out('nova.network.api.API.migrate_instance_start', fake)
self.stub_out('nova.network.api.API.migrate_instance_finish', fake)
def _test_finish_resize(self, power_on, resize_instance=True):
# Contrived test to ensure finish_resize doesn't raise anything and
# also tests resize from ACTIVE or STOPPED state which determines
@ -4980,8 +4970,6 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.volume.cinder.API.terminate_connection',
fake_terminate_connection)
self._stub_out_resize_network_methods()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
@ -5051,8 +5039,6 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.virt.fake.FakeDriver.finish_migration', throw_up)
self._stub_out_resize_network_methods()
old_flavor_name = 'm1.tiny'
instance = self._create_fake_instance_obj(type_name=old_flavor_name)
@ -5201,8 +5187,6 @@ class ComputeTestCase(BaseTestCase,
request_spec={}, filter_properties={}, node=None,
clean_shutdown=True, migration=None, host_list=[])
self._stub_out_resize_network_methods()
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
@ -5422,8 +5406,6 @@ class ComputeTestCase(BaseTestCase,
sys_meta = instance.system_metadata
self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
self._stub_out_resize_network_methods()
instance.task_state = task_states.RESIZE_PREP
instance.save()
@ -5505,8 +5487,6 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.virt.fake.FakeDriver.confirm_migration',
fake_confirm_migration_driver)
self._stub_out_resize_network_methods()
# Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
@ -5841,8 +5821,6 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
fake_finish_revert_migration_driver)
self._stub_out_resize_network_methods()
# Get initial memory usage
memory_mb_used = self.rt.compute_nodes[NODENAME].memory_mb_used
@ -5997,8 +5975,6 @@ class ComputeTestCase(BaseTestCase,
self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
fake)
self._stub_out_resize_network_methods()
self.compute.build_and_run_instance(self.context, instance, {},
request_spec, {},
block_device_mapping=[])
@ -6015,7 +5991,6 @@ class ComputeTestCase(BaseTestCase,
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
instance.uuid, 'pre-migrating')
source_compute = migration.source_compute
migration.dest_compute = NODENAME2
migration.dest_node = NODENAME2
migration.save()
@ -6040,13 +6015,6 @@ class ComputeTestCase(BaseTestCase,
migration=migration, instance=instance,
request_spec=request_spec)
# NOTE(hanrong): Prove that we pass the right value to the
# "self.network_api.migrate_instance_finish".
def fake_migrate_instance_finish(cls, context, instance, migration):
self.assertEqual(source_compute, migration.dest_compute)
self.stub_out('nova.network.api.API.migrate_instance_finish',
fake_migrate_instance_finish)
self.compute.finish_revert_resize(self.context,
migration=migration,
instance=instance,
@ -8492,15 +8460,9 @@ class ComputeTestCase(BaseTestCase,
@ddt.ddt
class ComputeAPITestCase(BaseTestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self, 1, 1)
super(ComputeAPITestCase, self).setUp()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.stub_out('nova.network.api.API.get_instance_nw_info',
fake_get_nw_info)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
self.compute_api = compute.API()
self.fake_image = {
'id': 'f9000000-0000-0000-0000-000000000000',
@ -10988,8 +10950,6 @@ class ComputeAPITestCase(BaseTestCase):
def test_lock(self, mock_event, mock_record, mock_elevate, mock_notify):
mock_elevate.return_value = self.context
instance = self._create_fake_instance_obj()
self.stub_out('nova.network.api.API.deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.lock(self.context, instance)
mock_record.assert_called_once_with(
self.context, instance, instance_actions.LOCK
@ -11033,8 +10993,6 @@ class ComputeAPITestCase(BaseTestCase):
def test_unlock(self, mock_event, mock_record, mock_elevate, mock_notify):
mock_elevate.return_value = self.context
instance = self._create_fake_instance_obj()
self.stub_out('nova.network.api.API.deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.unlock(self.context, instance)
mock_record.assert_called_once_with(
self.context, instance, instance_actions.UNLOCK

View File

@ -48,7 +48,6 @@ import nova.conf
from nova import context
from nova.db import api as db
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network.neutronv2 import api as neutronv2_api
from nova import objects
@ -5860,18 +5859,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
details={'ovs_hybrid_plug': True})],
[])
@mock.patch.object(utils, 'is_neutron', return_value=False)
def test_finish_revert_resize_network_migrate_finish_not_neutron(self, _):
"""Test that we're not waiting for any events if we're not using
Neutron.
"""
self._test_finish_revert_resize_network_migrate_finish(
[network_model.VIF(id=uuids.hybrid_vif,
details={'ovs_hybrid_plug': True}),
network_model.VIF(id=uuids.normal_vif,
details={'ovs_hybrid_plug': True})],
[])
@mock.patch('nova.compute.manager.LOG')
def test_cache_images_unsupported(self, mock_log):
r = self.compute.cache_images(self.context, ['an-image'])
@ -6121,19 +6108,16 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_build_run.side_effect = exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid)
with mock.patch.object(
self.compute.network_api,
'cleanup_instance_network_on_host') as mock_clean:
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
@ -6144,7 +6128,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
mock_clean.assert_not_called()
mock_nil.assert_called_once_with(self.instance)
mock_build.assert_called_once_with(self.context,
[self.instance], self.image, self.filter_properties,
@ -6209,18 +6192,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_build_and_run.side_effect = exception.RescheduledException(
reason='', instance_uuid=self.instance.uuid)
with mock.patch.object(
self.compute.network_api,
'cleanup_instance_network_on_host') as mock_cleanup_network:
self.compute._do_build_and_run_instance(self.context, instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits, host_list=fake_host_list)
self.compute._do_build_and_run_instance(self.context, instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits, host_list=fake_host_list)
mock_build_and_run.assert_called_once_with(self.context,
instance,
@ -6228,7 +6208,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
mock_cleanup_network.assert_not_called()
mock_build_ins.assert_called_once_with(self.context,
[instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
@ -6390,33 +6369,27 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_cleanup_allocated_networks')
@mock.patch.object(manager.ComputeManager,
'_nil_out_instance_obj_host_and_node')
@mock.patch.object(fake_driver.FakeDriver,
'deallocate_networks_on_reschedule')
@mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances')
@mock.patch.object(manager.ComputeManager, '_build_and_run_instance')
@mock.patch('nova.hooks._HOOKS')
def test_rescheduled_exception_do_not_deallocate_network(self, mock_hooks,
mock_build_run, mock_build, mock_deallocate, mock_nil,
mock_build_run, mock_build, mock_nil,
mock_clean_net, mock_save, mock_start,
mock_finish):
self._do_build_instance_update(mock_save, reschedule_update=True)
mock_build_run.side_effect = exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid)
mock_deallocate.return_value = False
with mock.patch.object(
self.compute.network_api,
'cleanup_instance_network_on_host') as mock_clean_inst:
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
@ -6427,8 +6400,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
mock_deallocate.assert_called_once_with(self.instance)
mock_clean_inst.assert_not_called()
mock_nil.assert_called_once_with(self.instance)
mock_build.assert_called_once_with(self.context,
[self.instance], self.image, self.filter_properties,
@ -6443,18 +6414,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_cleanup_allocated_networks')
@mock.patch.object(manager.ComputeManager,
'_nil_out_instance_obj_host_and_node')
@mock.patch.object(fake_driver.FakeDriver,
'deallocate_networks_on_reschedule')
@mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances')
@mock.patch.object(manager.ComputeManager, '_build_and_run_instance')
@mock.patch('nova.hooks._HOOKS')
def test_rescheduled_exception_deallocate_network(self, mock_hooks,
mock_build_run, mock_build, mock_deallocate, mock_nil, mock_clean,
mock_build_run, mock_build, mock_nil, mock_clean,
mock_save, mock_start, mock_finish):
self._do_build_instance_update(mock_save, reschedule_update=True)
mock_build_run.side_effect = exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid)
mock_deallocate.return_value = True
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
@ -6475,7 +6443,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties, {})
mock_deallocate.assert_called_once_with(self.instance)
mock_clean.assert_called_once_with(self.context, self.instance,
self.requested_networks)
mock_nil.assert_called_once_with(self.instance)
@ -6855,19 +6822,16 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_claim.side_effect = exc
self._do_build_instance_update(mock_save, reschedule_update=True)
with mock.patch.object(
self.compute.network_api,
'cleanup_instance_network_on_host') as mock_clean:
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping,
node=self.node, limits=self.limits,
host_list=fake_host_list)
self._instance_action_events(mock_start, mock_finish)
self._assert_build_instance_update(mock_save, reschedule_update=True)
@ -6883,7 +6847,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.security_groups, self.block_device_mapping,
request_spec={}, host_lists=[fake_host_list])
mock_nil.assert_called_once_with(self.instance)
mock_clean.assert_not_called()
@mock.patch.object(manager.ComputeManager, '_build_resources')
@mock.patch.object(objects.Instance, 'save')
@ -7286,8 +7249,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_failedspawn.assert_not_called()
@mock.patch.object(manager.ComputeManager, '_allocate_network')
@mock.patch.object(network_api.API, 'get_instance_nw_info')
def test_build_networks_if_not_allocated(self, mock_get, mock_allocate):
def test_build_networks_if_not_allocated(self, mock_allocate):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata={},
expected_attrs=['system_metadata'])
@ -7302,8 +7264,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertTrue(hasattr(nw_info_obj, 'wait'), "wait must be there")
@mock.patch.object(manager.ComputeManager, '_allocate_network')
@mock.patch.object(network_api.API, 'get_instance_nw_info')
def test_build_networks_if_allocated_false(self, mock_get, mock_allocate):
def test_build_networks_if_allocated_false(self, mock_allocate):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata=dict(network_allocated='False'),
expected_attrs=['system_metadata'])

View File

@ -392,19 +392,12 @@ class UsageInfoTestCase(test.TestCase):
self.public_key = fake_crypto.get_ssh_public_key()
self.fingerprint = '1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c'
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self, 1, 1)
super(UsageInfoTestCase, self).setUp()
self.stub_out('nova.network.api.get_instance_nw_info',
fake_get_nw_info)
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
self.flags(compute_driver='fake.FakeDriver',
network_manager='nova.network.manager.FlatManager')
self.flags(compute_driver='fake.FakeDriver')
self.compute = manager.ComputeManager()
self.user_id = 'fake'
self.project_id = 'fake'

View File

@ -19,28 +19,6 @@ import nova.network.security_group.openstack_driver as sgapi
import nova.test
class NetworkAPIConfigTest(nova.test.NoDBTestCase):
"""Test the transition from legacy to use_neutron config options."""
def setUp(self):
super(NetworkAPIConfigTest, self).setUp()
self.flags(use_neutron=False)
def test_default(self):
netapi = nova.network.API()
self.assertIsInstance(netapi, nova.network.api.API)
def test_use_neutron(self):
self.flags(use_neutron=True)
netapi = nova.network.API()
self.assertIsInstance(netapi, nova.network.neutronv2.api.API)
def test_dont_use_neutron(self):
self.flags(use_neutron=False)
netapi = nova.network.API()
self.assertIsInstance(netapi, nova.network.api.API)
class SecurityGroupAPIConfigTest(nova.test.NoDBTestCase):
@mock.patch('oslo_utils.importutils.import_object')

View File

@ -1286,11 +1286,11 @@ class TestNeutronv2(TestNeutronv2Base):
@mock.patch('nova.network.neutronv2.api.API._unbind_ports')
def test_allocate_for_instance_ex1(self, mock_unbind, mock_create_ports,
mock_populate, mock_get_client):
"""verify we will delete created ports
if we fail to allocate all net resources.
"""Verify we will delete created ports if we fail to allocate all net
resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
We mock to raise an exception when creating a second port. In this
case, the code should delete the first created port.
"""
self.instance = fake_instance.fake_instance_obj(self.context,
**self.instance)
@ -1531,9 +1531,7 @@ class TestNeutronv2(TestNeutronv2Base):
ret_data = copy.deepcopy(port_data)
if requested_networks:
if isinstance(requested_networks, objects.NetworkRequestList):
# NOTE(danms): Temporary and transitional
with mock.patch('nova.utils.is_neutron', return_value=True):
requested_networks = requested_networks.as_tuples()
requested_networks = requested_networks.as_tuples()
for net, fip, port, request_id in requested_networks:
ret_data.append({'network_id': net,
'device_id': self.instance.uuid,
@ -5096,7 +5094,6 @@ class TestNeutronv2(TestNeutronv2Base):
req_nets_in_call = mock_allocate.call_args[1]['requested_networks']
self.assertEqual('foo', req_nets_in_call.objects[0].tag)
@mock.patch('nova.objects.network_request.utils')
@mock.patch('nova.network.neutronv2.api.LOG')
@mock.patch('nova.network.neutronv2.api.base_api')
@mock.patch('nova.network.neutronv2.api.API._delete_ports')
@ -5110,9 +5107,7 @@ class TestNeutronv2(TestNeutronv2Base):
mock_unbind,
mock_deletep,
mock_baseapi,
mock_log,
req_utils):
req_utils.is_neutron.return_value = True
mock_log):
mock_inst = mock.Mock(project_id="proj-1",
availability_zone='zone-1',
uuid='inst-1')

View File

@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import objects
from nova.objects import network_request
from nova.tests.unit.objects import test_objects
@ -37,24 +35,15 @@ class _TestNetworkRequestObject(object):
self.assertFalse(request.auto_allocate)
self.assertFalse(request.no_allocate)
def test_to_tuple_neutron(self):
def test_to_tuple(self):
request = objects.NetworkRequest(network_id='123',
address='1.2.3.4',
port_id=FAKE_UUID,
)
with mock.patch('nova.utils.is_neutron', return_value=True):
self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
request.to_tuple())
self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
request.to_tuple())
def test_to_tuple_nova(self):
request = objects.NetworkRequest(network_id='123',
address='1.2.3.4',
port_id=FAKE_UUID)
with mock.patch('nova.utils.is_neutron', return_value=False):
self.assertEqual(('123', '1.2.3.4'),
request.to_tuple())
def test_from_tuples_neutron(self):
def test_from_tuples(self):
requests = objects.NetworkRequestList.from_tuples(
[('123', '1.2.3.4', FAKE_UUID, None)])
self.assertEqual(1, len(requests))
@ -63,16 +52,7 @@ class _TestNetworkRequestObject(object):
self.assertEqual(FAKE_UUID, requests[0].port_id)
self.assertIsNone(requests[0].pci_request_id)
def test_from_tuples_nova(self):
requests = objects.NetworkRequestList.from_tuples(
[('123', '1.2.3.4')])
self.assertEqual(1, len(requests))
self.assertEqual('123', requests[0].network_id)
self.assertEqual('1.2.3.4', str(requests[0].address))
self.assertIsNone(requests[0].port_id)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_list_as_tuples(self, is_neutron):
def test_list_as_tuples(self):
requests = objects.NetworkRequestList(
objects=[objects.NetworkRequest(network_id='123'),
objects.NetworkRequest(network_id='456')])

View File

@ -22,10 +22,10 @@ import hmac
import os
import re
try:
import cPickle as pickle
except ImportError:
try: # python 2
import pickle
except ImportError: # python 3
import cPickle as pickle
from keystoneauth1 import exceptions as ks_exceptions
from keystoneauth1 import session
@ -268,7 +268,6 @@ class MetadataTestCase(test.TestCase):
self.instance = fake_inst_obj(self.context)
self.keypair = fake_keypair_obj(self.instance.key_name,
self.instance.key_data)
fake_network.stub_out_nw_api_get_instance_nw_info(self)
fakes.stub_out_secgroup_api(self)
def test_can_pickle_metadata(self):
@ -524,8 +523,7 @@ class MetadataTestCase(test.TestCase):
mock_get.assert_called_once_with(network_info_from_api)
def test_local_ipv4(self):
nw_info = fake_network.fake_get_instance_nw_info(self,
num_networks=2)
nw_info = fake_network.fake_get_instance_nw_info(self)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self, self.instance,
network_info=nw_info, address="fake")
@ -533,8 +531,7 @@ class MetadataTestCase(test.TestCase):
self.assertEqual(expected_local, data['meta-data']['local-ipv4'])
def test_local_ipv4_from_nw_info(self):
nw_info = fake_network.fake_get_instance_nw_info(self,
num_networks=2)
nw_info = fake_network.fake_get_instance_nw_info(self)
expected_local = "192.168.1.100"
md = fake_InstanceMetadata(self, self.instance,
network_info=nw_info)
@ -620,7 +617,6 @@ class OpenStackMetadataTestCase(test.TestCase):
super(OpenStackMetadataTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
fake_network.stub_out_nw_api_get_instance_nw_info(self)
def test_empty_device_metadata(self):
fakes.stub_out_key_pair_funcs(self)
@ -1043,7 +1039,6 @@ class MetadataHandlerTestCase(test.TestCase):
def setUp(self):
super(MetadataHandlerTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.mdinst = fake_InstanceMetadata(self, self.instance,
@ -1676,7 +1671,6 @@ class MetadataHandlerTestCase(test.TestCase):
class MetadataPasswordTestCase(test.TestCase):
def setUp(self):
super(MetadataPasswordTestCase, self).setUp()
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_inst_obj(self.context)
self.mdinst = fake_InstanceMetadata(self, self.instance,

View File

@ -45,16 +45,7 @@ class NotificationsTestCase(test.TestCase):
super(NotificationsTestCase, self).setUp()
self.fixture = self.useFixture(o_fixture.ClearRequestContext())
self.net_info = fake_network.fake_get_instance_nw_info(self, 1,
1)
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return self.net_info
self.stub_out('nova.network.api.API.get_instance_nw_info',
fake_get_nw_info)
self.net_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)

View File

@ -94,9 +94,6 @@ class ServiceTestCase(test.NoDBTestCase):
self.topic = 'fake'
def test_create(self):
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host, binary=self.binary,
topic=self.topic,
manager='nova.tests.unit.test_service.FakeManager')

View File

@ -583,16 +583,6 @@ class ValidateIntegerTestCase(test.NoDBTestCase):
max_value=1000)
class ValidateNeutronConfiguration(test.NoDBTestCase):
def test_nova_network(self):
self.flags(use_neutron=False)
self.assertFalse(utils.is_neutron())
def test_neutron(self):
self.flags(use_neutron=True)
self.assertTrue(utils.is_neutron())
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))

View File

@ -559,10 +559,6 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound, self.drv.get_vnc_console,
mock.ANY, self.inst)
def test_deallocate_networks_on_reschedule(self):
candeallocate = self.drv.deallocate_networks_on_reschedule(mock.Mock())
self.assertTrue(candeallocate)
@mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
def test_attach_volume(self, mock_vscsi_adpt):
"""Validates the basic PowerVM attach volume."""

View File

@ -31,7 +31,6 @@ from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import importutils
from oslo_utils import uuidutils
import testtools
@ -283,7 +282,6 @@ class XenAPIVMTestCase(stubs.XenAPITestBase,
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = importutils.import_object(CONF.network_manager)
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')

View File

@ -360,19 +360,17 @@ def make_dev_path(dev, partition=None, base='/dev'):
def sanitize_hostname(hostname, default_name=None):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs except
the length of hostname.
"""Sanitize a given hostname.
Window, Linux, and Dnsmasq has different limitation:
Return a hostname which conforms to RFC-952 and RFC-1123 specs except the
length of hostname. Window, Linux, and dnsmasq has different limitation:
Windows: 255 (net_bios limits to 15, but window will truncate it)
Linux: 64
Dnsmasq: 63
- Windows: 255 (net_bios limits to 15, but window will truncate it)
- Linux: 64
- dnsmasq: 63
Due to nova-network will leverage dnsmasq to set hostname, so we chose
63.
"""
We choose the lowest of these (so 63).
"""
def truncate_hostname(name):
if len(name) > 63:

View File

@ -1503,10 +1503,6 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
def manage_image_cache(self, context, all_instances):
"""Manage the driver's local image cache.

View File

@ -1037,14 +1037,6 @@ class IronicDriver(virt_driver.ComputeDriver):
return hardware.InstanceInfo(state=map_power_state(node.power_state))
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def _get_network_metadata(self, node, network_info):
"""Gets a more complete representation of the instance network info.

View File

@ -61,8 +61,8 @@ def get_injected_network_template(network_info, template=None,
libvirt_virt_type=None):
"""Returns a rendered network template for the given network_info.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param network_info: `nova.network.models.NetworkInfo` object describing
the network metadata.
:param template: Path to the interfaces template file.
:param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for
other hypervisors..

View File

@ -483,8 +483,8 @@ class PowerVMDriver(driver.ComputeDriver):
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param network_info: `nova.network.models.NetworkInfo` object
describing the network metadata.
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
@ -580,14 +580,6 @@ class PowerVMDriver(driver.ComputeDriver):
sare.reraise = False
raise exc.InstanceNotFound(instance_id=instance.uuid)
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the volume to the instance using the connection_info.