Merge "Include lswitch id in NSX plugin port mappings"

This commit is contained in:
Jenkins 2013-12-17 03:58:37 +00:00 committed by Gerrit Code Review
commit 648f787d80
10 changed files with 322 additions and 102 deletions

View File

@ -0,0 +1,69 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_mappings
Revision ID: 50e86cb2637a
Revises: havana
Create Date: 2013-10-26 14:37:30.012149
"""
# revision identifiers, used by Alembic.
revision = '50e86cb2637a'
down_revision = '1fcfc149aca4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Update table for port/lswitchport mappings
op.rename_table('neutron_nvp_port_mapping', 'neutron_nsx_port_mappings')
op.add_column(
'neutron_nsx_port_mappings',
sa.Column('nsx_switch_id', sa.String(length=36), nullable=True))
op.alter_column(
'neutron_nsx_port_mappings', 'nvp_id',
new_column_name='nsx_port_id',
existing_nullable=True,
existing_type=sa.String(length=36))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
# Restore table to pre-icehouse version
op.drop_column('neutron_nsx_port_mappings', 'nsx_switch_id')
op.alter_column(
'neutron_nsx_port_mappings', 'nsx_port_id',
new_column_name='nvp_id',
existing_nullable=True,
existing_type=sa.String(length=36))
op.rename_table('neutron_nsx_port_mappings', 'neutron_nvp_port_mapping')

View File

@ -61,6 +61,7 @@ from neutron.openstack.common import lockutils
from neutron.plugins.common import constants as plugin_const
from neutron.plugins.nicira.common import config # noqa
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import nsx_utils
from neutron.plugins.nicira.common import securitygroups as nvp_sec
from neutron.plugins.nicira.common import sync
from neutron.plugins.nicira.dbexts import distributedrouter as dist_rtr
@ -446,7 +447,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Remove orphaned port from NVP
nvplib.delete_port(self.cluster, ls_uuid, lp_uuid)
# rollback the neutron-nvp port mapping
nicira_db.delete_neutron_nvp_port_mapping(context.session,
nicira_db.delete_neutron_nsx_port_mapping(context.session,
port_id)
msg = (_("An exception occurred while creating the "
"quantum port %s on the NVP plaform") % port_id)
@ -474,8 +475,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_neutron_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
if port_data['device_owner'] not in self.port_special_owners:
nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
@ -499,8 +501,8 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"external networks. Port %s will be down."),
port_data['network_id'])
return
nvp_port_id = self._nvp_get_port_id(context, self.cluster,
port_data)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nvp_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
@ -509,21 +511,20 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# the lswitch.
try:
nvplib.delete_port(self.cluster,
port_data['network_id'],
nvp_switch_id,
nvp_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except q_exc.NotFound:
LOG.warning(_("Port %s not found in NVP"), port_data['id'])
def _nvp_delete_router_port(self, context, port_data):
# Delete logical router port
lrouter_id = port_data['device_id']
nvp_port_id = self._nvp_get_port_id(context, self.cluster,
port_data)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nvp_port_id:
LOG.warn(_("Neutron port %(port_id)s not found on NVP backend. "
"Terminating delete operation. A dangling router port "
@ -534,7 +535,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
try:
nvplib.delete_peer_router_lport(self.cluster,
lrouter_id,
port_data['network_id'],
nvp_switch_id,
nvp_port_id)
except NvpApiClient.NvpApiException:
# Do not raise because the issue might as well be that the
@ -573,8 +574,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self.cluster, context, router_id, port_data,
"PatchAttachment", ls_port['uuid'],
subnet_ids=[subnet_id])
nicira_db.add_neutron_nvp_port_mapping(
context.session, port_data['id'], ls_port['uuid'])
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], ls_port['uuid'])
LOG.debug(_("_nvp_create_router_port completed for port "
"%(name)s on network %(network_id)s. The new "
"port id is %(id)s."),
@ -702,8 +704,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_neutron_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
nvplib.plug_l2_gw_service(
self.cluster,
port_data['network_id'],
@ -730,33 +733,6 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# this is a no-op driver
pass
def _nvp_get_port_id(self, context, cluster, neutron_port):
"""Return the NVP port uuid for a given neutron port.
First, look up the Neutron database. If not found, execute
a query on NVP platform as the mapping might be missing because
the port was created before upgrading to grizzly.
"""
nvp_port_id = nicira_db.get_nvp_port_id(context.session,
neutron_port['id'])
if nvp_port_id:
return nvp_port_id
# Perform a query to NVP and then update the DB
try:
nvp_port = nvplib.get_port_by_neutron_tag(
cluster,
neutron_port['network_id'],
neutron_port['id'])
except NvpApiClient.NvpApiException:
LOG.exception(_("Unable to find NVP uuid for Neutron port %s"),
neutron_port['id'])
if nvp_port:
nicira_db.add_neutron_nvp_port_mapping(
context.session, neutron_port['id'],
nvp_port['uuid'])
return nvp_port['uuid']
def _extend_fault_map(self):
"""Extends the Neutron Fault Map.
@ -1033,23 +1009,24 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
'device_owner': ['network:router_interface']}
router_iface_ports = self.get_ports(context, filters=port_filter)
for port in router_iface_ports:
nvp_port_id = self._nvp_get_port_id(
context, self.cluster, port)
if nvp_port_id:
port['nvp_port_id'] = nvp_port_id
else:
LOG.warning(_("A nvp lport identifier was not found for "
"neutron port '%s'"), port['id'])
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
try:
if 'nvp_port_id' in port:
if nvp_port_id:
nvplib.delete_peer_router_lport(self.cluster,
port['device_id'],
port['network_id'],
port['nvp_port_id'])
nvp_switch_id,
nvp_port_id)
else:
LOG.warning(_("A nvp lport identifier was not found for "
"neutron port '%s'. Unable to remove "
"the peer router port for this switch port"),
port['id'])
except (TypeError, KeyError,
NvpApiClient.NvpApiException,
NvpApiClient.ResourceNotFound):
@ -1283,12 +1260,12 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._process_port_queue_mapping(context, ret_port,
port_queue_id)
LOG.warn(_("Update port request: %s"), port)
nvp_port_id = self._nvp_get_port_id(
context, self.cluster, ret_port)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
if nvp_port_id:
try:
nvplib.update_port(self.cluster,
ret_port['network_id'],
nvp_switch_id,
nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
@ -1655,21 +1632,10 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
subnet_id = router_iface_info['subnet_id']
if port_id:
port_data = self._get_port(context, port_id)
nvp_port_id = self._nvp_get_port_id(
context, self.cluster, port_data)
# Fetch lswitch port from NVP in order to retrieve LS uuid
# this is necessary as in the case of bridged networks
# ls_uuid may be different from network id
# TODO(salv-orlando): avoid this NVP round trip by storing
# lswitch uuid together with lport uuid mapping.
nvp_port = nvplib.query_lswitch_lports(
self.cluster, '*',
filters={'uuid': nvp_port_id},
relations='LogicalSwitchConfig')[0]
ls_uuid = nvp_port['_relations']['LogicalSwitchConfig']['uuid']
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_id)
# Unplug current attachment from lswitch port
nvplib.plug_interface(self.cluster, ls_uuid,
nvplib.plug_interface(self.cluster, nvp_switch_id,
nvp_port_id, "NoAttachment")
# Create logical router port and plug patch attachment
self._create_and_attach_router_port(

View File

@ -0,0 +1,75 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.openstack.common import log
from neutron.plugins.nicira.dbexts import nicira_db
from neutron.plugins.nicira import nvplib
LOG = log.getLogger(__name__)
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
"""Return the NSX switch and port uuids for a given neutron port.
First, look up the Neutron database. If not found, execute
a query on NSX platform as the mapping might be missing because
the port was created before upgrading to grizzly.
This routine also retrieves the identifier of the logical switch in
the backend where the port is plugged. Prior to Icehouse this
information was not available in the Neutron Database. For dealing
with pre-existing records, this routine will query the backend
for retrieving the correct switch identifier.
As of Icehouse release it is not indeed anymore possible to assume
the backend logical switch identifier is equal to the neutron
network identifier.
"""
nvp_switch_id, nvp_port_id = nicira_db.get_nsx_switch_and_port_id(
session, neutron_port_id)
if not nvp_switch_id:
# Find logical switch for port from backend
# This is a rather expensive query, but it won't be executed
# more than once for each port in Neutron's lifetime
nvp_ports = nvplib.query_lswitch_lports(
cluster, '*', relations='LogicalSwitchConfig',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
# Only one result expected
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nvp_ports:
LOG.warn(_("Unable to find NVP port for Neutron port %s"),
neutron_port_id)
# This method is supposed to return a tuple
return None, None
nvp_port = nvp_ports[0]
nvp_switch_id = (nvp_port['_relations']
['LogicalSwitchConfig']['uuid'])
with session.begin(subtransactions=True):
if nvp_port_id:
# Mapping already exists. Delete before recreating
nicira_db.delete_neutron_nsx_port_mapping(
session, neutron_port_id)
else:
nvp_port_id = nvp_port['uuid']
# (re)Create DB mapping
nicira_db.add_neutron_nsx_port_mapping(
session, neutron_port_id,
nvp_switch_id, nvp_port_id)
return nvp_switch_id, nvp_port_id

View File

@ -26,6 +26,7 @@ from neutron.openstack.common import log
from neutron.openstack.common import loopingcall
from neutron.openstack.common import timeutils
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import nsx_utils
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
@ -375,12 +376,12 @@ class NvpSynchronizer():
if not lswitchport:
# Try to get port from nvp
try:
lp_uuid = self._plugin._nvp_get_port_id(
context, self._cluster, neutron_port_data)
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
context.session, self._cluster, neutron_port_data['id'])
if lp_uuid:
lswitchport = nvplib.get_port(
self._cluster, neutron_port_data['network_id'],
lp_uuid, relations='LogicalPortStatus')
self._cluster, ls_uuid, lp_uuid,
relations='LogicalPortStatus')
except (exceptions.PortNotFoundOnNetwork):
# NOTE(salv-orlando): We should be catching
# NvpApiClient.ResourceNotFound here instead

View File

@ -47,26 +47,30 @@ def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
return binding
def add_neutron_nvp_port_mapping(session, neutron_id, nvp_id):
def add_neutron_nsx_port_mapping(session, neutron_id,
nsx_switch_id, nsx_port_id):
with session.begin(subtransactions=True):
mapping = nicira_models.NeutronNvpPortMapping(neutron_id, nvp_id)
mapping = nicira_models.NeutronNsxPortMapping(
neutron_id, nsx_switch_id, nsx_port_id)
session.add(mapping)
return mapping
def get_nvp_port_id(session, neutron_id):
def get_nsx_switch_and_port_id(session, neutron_id):
try:
mapping = (session.query(nicira_models.NeutronNvpPortMapping).
filter_by(quantum_id=neutron_id).
mapping = (session.query(nicira_models.NeutronNsxPortMapping).
filter_by(neutron_id=neutron_id).
one())
return mapping['nvp_id']
return mapping['nsx_switch_id'], mapping['nsx_port_id']
except exc.NoResultFound:
return
LOG.debug(_("NSX identifiers for neutron port %s not yet "
"stored in Neutron DB"), neutron_id)
return None, None
def delete_neutron_nvp_port_mapping(session, neutron_id):
return (session.query(nicira_models.NeutronNvpPortMapping).
filter_by(quantum_id=neutron_id).delete())
def delete_neutron_nsx_port_mapping(session, neutron_id):
return (session.query(nicira_models.NeutronNsxPortMapping).
filter_by(neutron_id=neutron_id).delete())
def unset_default_network_gateways(session):

View File

@ -57,18 +57,20 @@ class NvpNetworkBinding(model_base.BASEV2):
self.vlan_id)
class NeutronNvpPortMapping(model_base.BASEV2):
class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nvp port uuids."""
__tablename__ = 'quantum_nvp_port_mapping'
quantum_id = Column(String(36),
__tablename__ = 'neutron_nsx_port_mappings'
neutron_id = Column(String(36),
ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nvp_id = Column(String(36))
nsx_switch_id = Column(String(36))
nsx_port_id = Column(String(36))
def __init__(self, quantum_id, nvp_id):
self.quantum_id = quantum_id
self.nvp_id = nvp_id
def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
self.neutron_id = neutron_id
self.nsx_switch_id = nsx_switch_id
self.nsx_port_id = nsx_port_id
class MultiProviderNetworks(model_base.BASEV2):

View File

@ -22,6 +22,7 @@ from neutron.plugins.nicira import extensions
import neutron.plugins.nicira.NeutronPlugin as plugin
import neutron.plugins.nicira.NeutronServicePlugin as service_plugin
import neutron.plugins.nicira.NvpApiClient as nvpapi
from neutron.plugins.nicira import nvplib
from neutron.plugins.nicira.vshield.common import (
VcnsApiClient as vcnsapi)
from neutron.plugins.nicira.vshield import vcns
@ -38,6 +39,7 @@ vcns_api_helper = vcnsapi.VcnsApiHelper
STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
NVPEXT_PATH = os.path.dirname(extensions.__file__)
NVPAPI_NAME = '%s.%s' % (api_helper.__module__, api_helper.__name__)
NVPLIB_NAME = nvplib.__name__
PLUGIN_NAME = '%s.%s' % (nvp_plugin.__module__, nvp_plugin.__name__)
SERVICE_PLUGIN_NAME = '%s.%s' % (nvp_service_plugin.__module__,
nvp_service_plugin.__name__)
@ -50,3 +52,7 @@ VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__,
def get_fake_conf(filename):
return os.path.join(STUBS_PATH, filename)
def nicira_method(method_name, module_name='nvplib'):
return '%s.%s.%s' % ('neutron.plugins.nicira', module_name, method_name)

View File

@ -243,7 +243,7 @@ class TestNiciraPortsV2(NiciraPluginV2TestCase,
self._verify_no_orphan_left(net_id)
def test_create_port_neutron_error_no_orphan_left(self):
with mock.patch.object(nicira_db, 'add_neutron_nvp_port_mapping',
with mock.patch.object(nicira_db, 'add_neutron_nsx_port_mapping',
side_effect=ntn_exc.NeutronException):
with self.network() as net:
net_id = net['network']['id']

View File

@ -0,0 +1,96 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 VMware.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.db import api as db_api
from neutron.openstack.common import uuidutils
from neutron.plugins.nicira.common import nsx_utils
from neutron.tests import base
from neutron.tests.unit.nicira import nicira_method
class NsxUtilsTestCase(base.BaseTestCase):
def _mock_db_calls(self, get_switch_port_id_ret_value):
# Mock relevant db calls
# This will allow for avoiding setting up the plugin
# for creating db entries
mock.patch(nicira_method('get_nsx_switch_and_port_id',
module_name='dbexts.nicira_db'),
return_value=get_switch_port_id_ret_value).start()
mock.patch(nicira_method('add_neutron_nsx_port_mapping',
module_name='dbexts.nicira_db')).start()
mock.patch(nicira_method('delete_neutron_nsx_port_mapping',
module_name='dbexts.nicira_db')).start()
self.addCleanup(mock.patch.stopall)
def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
# The nvplib and db calls are mocked, therefore the cluster
# and the neutron_port_id parameters can be set to None
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
db_api.get_session(), None, None)
self.assertEqual(exp_ls_uuid, ls_uuid)
self.assertEqual(exp_lp_uuid, lp_uuid)
def test_get_nsx_switch_and_port_id_from_db_mappings(self):
# This test is representative of the 'standard' case in which both the
# switch and the port mappings were stored in the neutron db
exp_ls_uuid = uuidutils.generate_uuid()
exp_lp_uuid = uuidutils.generate_uuid()
ret_value = exp_ls_uuid, exp_lp_uuid
self._mock_db_calls(ret_value)
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
def test_get_nsx_switch_and_port_id_only_port_db_mapping(self):
# This test is representative of the case in which a port with a nvp
# db mapping in the havana db was upgraded to icehouse
exp_ls_uuid = uuidutils.generate_uuid()
exp_lp_uuid = uuidutils.generate_uuid()
ret_value = None, exp_lp_uuid
self._mock_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
return_value=[{'uuid': exp_lp_uuid,
'_relations': {
'LogicalSwitchConfig': {
'uuid': exp_ls_uuid}
}}]):
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
def test_get_nsx_switch_and_port_id_no_db_mapping(self):
# This test is representative of the case where db mappings where not
# found for a given port identifier
exp_ls_uuid = uuidutils.generate_uuid()
exp_lp_uuid = uuidutils.generate_uuid()
ret_value = None, None
self._mock_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
return_value=[{'uuid': exp_lp_uuid,
'_relations': {
'LogicalSwitchConfig': {
'uuid': exp_ls_uuid}
}}]):
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
def test_get_nsx_switch_and_port_id_no_mappings_returns_none(self):
# This test verifies that the function return (None, None) if the
# mappings are not found both in the db and the backend
ret_value = None, None
self._mock_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
return_value=[]):
self._verify_get_nsx_switch_and_port_id(None, None)

View File

@ -30,6 +30,7 @@ from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
from neutron.tests import base
from neutron.tests.unit.nicira import fake_nvpapiclient
from neutron.tests.unit.nicira import nicira_method
from neutron.tests.unit.nicira import NVPAPI_NAME
from neutron.tests.unit.nicira import STUBS_PATH
from neutron.tests.unit import test_api_v2
@ -472,7 +473,7 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
'type': 'LogicalRouterStatus',
'lport_link_up_count': 0, }, }
with mock.patch(_nicira_method('do_request'),
with mock.patch(nicira_method('do_request'),
return_value=self._get_lrouter(tenant_id,
router_name,
router_id,
@ -486,7 +487,7 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
router_name = 'fake_router_name'
router_id = 'fake_router_id'
nexthop_ip = '10.0.0.1'
with mock.patch(_nicira_method('do_request'),
with mock.patch(nicira_method('do_request'),
return_value=self._get_lrouter(tenant_id,
router_name,
router_id)):
@ -503,9 +504,9 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
"destination": "169.254.169.0/30"}, ]
nvp_routes = [self._get_single_route(router_id)]
with mock.patch(_nicira_method('get_explicit_routes_lrouter'),
with mock.patch(nicira_method('get_explicit_routes_lrouter'),
return_value=nvp_routes):
with mock.patch(_nicira_method('create_explicit_route_lrouter'),
with mock.patch(nicira_method('create_explicit_route_lrouter'),
return_value='fake_uuid'):
old_routes = nvplib.update_explicit_routes_lrouter(
self.fake_cluster, router_id, new_routes)
@ -517,9 +518,9 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
"destination": "169.254.169.0/30"}, ]
nvp_routes = [self._get_single_route(router_id)]
with mock.patch(_nicira_method('get_explicit_routes_lrouter'),
with mock.patch(nicira_method('get_explicit_routes_lrouter'),
return_value=nvp_routes):
with mock.patch(_nicira_method('create_explicit_route_lrouter'),
with mock.patch(nicira_method('create_explicit_route_lrouter'),
side_effect=NvpApiClient.NvpApiException):
self.assertRaises(NvpApiClient.NvpApiException,
nvplib.update_explicit_routes_lrouter,
@ -536,11 +537,11 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch(_nicira_method('get_explicit_routes_lrouter'),
with mock.patch(nicira_method('get_explicit_routes_lrouter'),
return_value=nvp_routes):
with mock.patch(_nicira_method('delete_explicit_route_lrouter'),
with mock.patch(nicira_method('delete_explicit_route_lrouter'),
return_value=None):
with mock.patch(_nicira_method(
with mock.patch(nicira_method(
'create_explicit_route_lrouter'),
return_value='fake_uuid'):
old_routes = nvplib.update_explicit_routes_lrouter(
@ -558,12 +559,12 @@ class TestNvplibExplicitLRouters(NvplibTestCase):
self._get_single_route(router_id, 'fake_route_id_2',
'0.0.0.2/24', '10.0.0.4'), ]
with mock.patch(_nicira_method('get_explicit_routes_lrouter'),
with mock.patch(nicira_method('get_explicit_routes_lrouter'),
return_value=nvp_routes):
with mock.patch(_nicira_method('delete_explicit_route_lrouter'),
with mock.patch(nicira_method('delete_explicit_route_lrouter'),
side_effect=NvpApiClient.NvpApiException):
with mock.patch(
_nicira_method('create_explicit_route_lrouter'),
nicira_method('create_explicit_route_lrouter'),
return_value='fake_uuid'):
self.assertRaises(
NvpApiClient.NvpApiException,