Rename L2 Switch/Gateway related methods for VMware NSX plugin

Yet another another step for the renaming/refactoring
of nvplib and related modules. This is about l2 switches
and l2 gateways.

Partial-implements blueprint nicira-plugin-renaming

Change-Id: I35c2b20a33ed8f2df4334c335f91472f7b822f8e
This commit is contained in:
armando-migliaccio 2014-01-16 13:06:30 -08:00
parent 13f9c5c372
commit 1242c8d90b
18 changed files with 1082 additions and 961 deletions

View File

@ -76,8 +76,10 @@ from neutron.plugins.nicira import dhcpmeta_modes
from neutron.plugins.nicira.extensions import maclearning as mac_ext
from neutron.plugins.nicira.extensions import nvp_networkgw as networkgw
from neutron.plugins.nicira.extensions import nvp_qos as ext_qos
from neutron.plugins.nicira.nsxlib import l2gateway as l2gwlib
from neutron.plugins.nicira.nsxlib import queue as queuelib
from neutron.plugins.nicira.nsxlib import router as routerlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
@ -411,17 +413,17 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
def _nvp_create_port_helper(self, cluster, ls_uuid, port_data,
do_port_security=True):
return nvplib.create_lport(cluster, ls_uuid, port_data['tenant_id'],
port_data['id'], port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
port_data[ext_sg.SECURITYGROUPS],
port_data.get(ext_qos.QUEUE),
port_data.get(mac_ext.MAC_LEARNING),
port_data.get(addr_pair.ADDRESS_PAIRS))
return switchlib.create_lport(cluster, ls_uuid, port_data['tenant_id'],
port_data['id'], port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
port_data[ext_sg.SECURITYGROUPS],
port_data.get(ext_qos.QUEUE),
port_data.get(mac_ext.MAC_LEARNING),
port_data.get(addr_pair.ADDRESS_PAIRS))
def _handle_create_port_exception(self, context, port_id,
ls_uuid, lp_uuid):
@ -430,8 +432,8 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# created on NVP. Should this command fail the original
# exception will be raised.
if lp_uuid:
# Remove orphaned port from NVP
nvplib.delete_port(self.cluster, ls_uuid, lp_uuid)
# Remove orphaned port from NSX
switchlib.delete_port(self.cluster, ls_uuid, lp_uuid)
# rollback the neutron-nvp port mapping
nicira_db.delete_neutron_nsx_port_mapping(context.session,
port_id)
@ -465,9 +467,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
if port_data['device_owner'] not in self.port_special_owners:
nvplib.plug_interface(self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
port_data['id'])
switchlib.plug_interface(
self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment", port_data['id'])
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
@ -487,9 +489,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.warning(msg)
if selected_lswitch and lport:
try:
nvplib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
switchlib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
except q_exc.NotFound:
LOG.debug(_("NSX Port %s already gone"), lport['uuid'])
@ -503,18 +505,16 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"external networks. Port %s will be down."),
port_data['network_id'])
return
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nvp_port_id:
if not nsx_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
nvplib.delete_port(self.cluster,
nvp_switch_id,
nvp_port_id)
switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
@ -716,7 +716,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nicira_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
nvplib.plug_l2_gw_service(
l2gwlib.plug_l2_gw_service(
self.cluster,
port_data['network_id'],
lport['uuid'],
@ -725,9 +725,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except Exception:
with excutils.save_and_reraise_exception():
if lport:
nvplib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
switchlib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
LOG.debug(_("_nvp_create_l2_gw_port completed for port %(name)s "
"on network %(network_id)s. The new port id "
"is %(id)s."), port_data)
@ -864,14 +864,14 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# The tag must therefore be added.
tags = main_ls['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster,
main_ls['uuid'],
main_ls['display_name'],
network['tenant_id'],
tags=tags)
switchlib.update_lswitch(cluster,
main_ls['uuid'],
main_ls['display_name'],
network['tenant_id'],
tags=tags)
transport_zone_config = self._convert_to_nvp_transport_zones(
cluster, network, bindings=network_bindings)
selected_lswitch = nvplib.create_lswitch(
selected_lswitch = switchlib.create_lswitch(
cluster, network.id, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config)
@ -983,7 +983,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
net_data['id'] = str(uuid.uuid4())
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
lswitch = nvplib.create_lswitch(
lswitch = switchlib.create_lswitch(
self.cluster, net_data['id'],
tenant_id, net_data.get('name'),
transport_zone_config,
@ -1074,7 +1074,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Do not go to NVP for external networks
if not external:
try:
nvplib.delete_networks(self.cluster, id, lswitch_ids)
switchlib.delete_networks(self.cluster, id, lswitch_ids)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
except q_exc.NotFound:
@ -1292,34 +1292,35 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._process_port_queue_mapping(context, ret_port,
port_queue_id)
LOG.warn(_("Update port request: %s"), port)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
if nvp_port_id:
if nsx_port_id:
try:
nvplib.update_port(self.cluster,
nvp_switch_id,
nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
ret_port[ext_sg.SECURITYGROUPS],
ret_port[ext_qos.QUEUE],
ret_port.get(mac_ext.MAC_LEARNING),
ret_port.get(addr_pair.ADDRESS_PAIRS))
switchlib.update_port(
self.cluster,
nsx_switch_id, nsx_port_id, id, tenant_id,
ret_port['name'],
ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
ret_port[ext_sg.SECURITYGROUPS],
ret_port[ext_qos.QUEUE],
ret_port.get(mac_ext.MAC_LEARNING),
ret_port.get(addr_pair.ADDRESS_PAIRS))
# Update the port status from nvp. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = nvplib.get_port_status(
ret_port['status'] = switchlib.get_port_status(
self.cluster, ret_port['network_id'],
nvp_port_id)
nsx_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."),
nvp_port_id)
nsx_port_id)
# If nvp_port_id is not in database or in nvp put in error state.
else:
@ -1688,15 +1689,15 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, self.cluster, router_id)
if port_id:
port_data = self._get_port(context, port_id)
nvp_switch_id, nvp_port_id = nsx_utils.get_nsx_switch_and_port_id(
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_id)
# Unplug current attachment from lswitch port
nvplib.plug_interface(self.cluster, nvp_switch_id,
nvp_port_id, "NoAttachment")
switchlib.plug_interface(self.cluster, nsx_switch_id,
nsx_port_id, "NoAttachment")
# Create logical router port and plug patch attachment
self._create_and_attach_router_port(
self.cluster, context, nsx_router_id, port_data,
"PatchAttachment", nvp_port_id, subnet_ids=[subnet_id])
"PatchAttachment", nsx_port_id, subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
@ -2013,20 +2014,20 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if not device.get('interface_name'):
device['interface_name'] = self.cluster.default_interface_name
try:
nvp_res = nvplib.create_l2_gw_service(self.cluster, tenant_id,
gw_data['name'], devices)
nvp_uuid = nvp_res.get('uuid')
nsx_res = l2gwlib.create_l2_gw_service(
self.cluster, tenant_id, gw_data['name'], devices)
nsx_uuid = nsx_res.get('uuid')
except NvpApiClient.Conflict:
raise nvp_exc.NvpL2GatewayAlreadyInUse(gateway=gw_data['name'])
except NvpApiClient.NvpApiException:
err_msg = _("Unable to create l2_gw_service for: %s") % gw_data
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
gw_data['id'] = nvp_uuid
gw_data['id'] = nsx_uuid
return super(NvpPluginV2, self).create_network_gateway(context,
network_gateway)
def delete_network_gateway(self, context, id):
def delete_network_gateway(self, context, gateway_id):
"""Remove a layer-2 network gateway.
Remove the gateway service from NVP platform and corresponding data
@ -2036,8 +2037,9 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._ensure_default_network_gateway()
with context.session.begin(subtransactions=True):
try:
super(NvpPluginV2, self).delete_network_gateway(context, id)
nvplib.delete_l2_gw_service(self.cluster, id)
super(NvpPluginV2, self).delete_network_gateway(
context, gateway_id)
l2gwlib.delete_l2_gw_service(self.cluster, gateway_id)
except NvpApiClient.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NVP resource does not exist
@ -2067,7 +2069,7 @@ class NvpPluginV2(addr_pair_db.AllowedAddressPairsMixin,
name = network_gateway[networkgw.RESOURCE_NAME].get('name')
if name:
try:
nvplib.update_l2_gw_service(self.cluster, id, name)
l2gwlib.update_l2_gw_service(self.cluster, id, name)
except NvpApiClient.NvpApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on

View File

@ -37,6 +37,7 @@ from neutron.plugins.nicira.dbexts import vcns_models
from neutron.plugins.nicira.extensions import servicerouter as sr
from neutron.plugins.nicira import NeutronPlugin
from neutron.plugins.nicira.nsxlib import router as routerlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
from neutron.plugins.nicira.vshield.common import (
@ -109,12 +110,12 @@ class NvpAdvancedPlugin(sr_db.ServiceRouter_mixin,
# load the vCNS driver
self._load_vcns_drivers()
# nvplib's create_lswitch needs to be replaced in order to proxy
# switchlib's create_lswitch needs to be replaced in order to proxy
# logical switch create requests to vcns
self._set_create_lswitch_proxy()
def _set_create_lswitch_proxy(self):
NeutronPlugin.nvplib.create_lswitch = self._proxy_create_lswitch
NeutronPlugin.switchlib.create_lswitch = self._proxy_create_lswitch
def _proxy_create_lswitch(self, *args, **kwargs):
name, tz_config, tags = (
@ -412,7 +413,7 @@ class NvpAdvancedPlugin(sr_db.ServiceRouter_mixin,
lrouter, lswitch):
# create logic switch port
try:
ls_port = nvplib.create_lport(
ls_port = switchlib.create_lport(
self.cluster, lswitch['uuid'], tenant_id,
'', '', lrouter['uuid'], True)
except NvpApiClient.NvpApiException:
@ -433,7 +434,8 @@ class NvpAdvancedPlugin(sr_db.ServiceRouter_mixin,
except NvpApiClient.NvpApiException:
msg = (_("Unable to create port on NVP logical router %s") % name)
LOG.exception(msg)
nvplib.delete_port(self.cluster, lswitch['uuid'], ls_port['uuid'])
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise q_exc.NeutronException(message=msg)
# attach logic router port to switch port
@ -443,7 +445,8 @@ class NvpAdvancedPlugin(sr_db.ServiceRouter_mixin,
'PatchAttachment', ls_port['uuid'], None)
except NvpApiClient.NvpApiException as e:
# lr_port should have been deleted
nvplib.delete_port(self.cluster, lswitch['uuid'], ls_port['uuid'])
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise e
def _create_lrouter(self, context, router, nexthop):

View File

@ -19,8 +19,8 @@ from neutron.openstack.common import log
from neutron.plugins.nicira.dbexts import nicira_db
from neutron.plugins.nicira import nsx_cluster
from neutron.plugins.nicira.nsxlib import router as routerlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
LOG = log.getLogger(__name__)
@ -37,9 +37,9 @@ def fetch_nsx_switches(session, cluster, neutron_net_id):
"""
nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
if len(nsx_switch_ids) > 1:
lswitches = nvplib.get_lswitches(cluster, neutron_net_id)
lswitches = switchlib.get_lswitches(cluster, neutron_net_id)
else:
lswitches = [nvplib.get_lswitch_by_id(
lswitches = [switchlib.get_lswitch_by_id(
cluster, nsx_switch_ids[0])]
return lswitches
@ -56,7 +56,7 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
# Find logical switches from backend.
# This is a rather expensive query, but it won't be executed
# more than once for each network in Neutron's lifetime
nsx_switches = nvplib.get_lswitches(cluster, neutron_network_id)
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches:
LOG.warn(_("Unable to find NSX switches for Neutron network %s"),
neutron_network_id)
@ -91,38 +91,38 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
the backend logical switch identifier is equal to the neutron
network identifier.
"""
nvp_switch_id, nvp_port_id = nicira_db.get_nsx_switch_and_port_id(
nsx_switch_id, nsx_port_id = nicira_db.get_nsx_switch_and_port_id(
session, neutron_port_id)
if not nvp_switch_id:
if not nsx_switch_id:
# Find logical switch for port from backend
# This is a rather expensive query, but it won't be executed
# more than once for each port in Neutron's lifetime
nvp_ports = nvplib.query_lswitch_lports(
nsx_ports = switchlib.query_lswitch_lports(
cluster, '*', relations='LogicalSwitchConfig',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
# Only one result expected
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nvp_ports:
LOG.warn(_("Unable to find NVP port for Neutron port %s"),
if not nsx_ports:
LOG.warn(_("Unable to find NSX port for Neutron port %s"),
neutron_port_id)
# This method is supposed to return a tuple
return None, None
nvp_port = nvp_ports[0]
nvp_switch_id = (nvp_port['_relations']
nsx_port = nsx_ports[0]
nsx_switch_id = (nsx_port['_relations']
['LogicalSwitchConfig']['uuid'])
if nvp_port_id:
if nsx_port_id:
# Mapping already exists. Delete before recreating
nicira_db.delete_neutron_nsx_port_mapping(
session, neutron_port_id)
else:
nvp_port_id = nvp_port['uuid']
nsx_port_id = nsx_port['uuid']
# (re)Create DB mapping
nicira_db.add_neutron_nsx_port_mapping(
session, neutron_port_id,
nvp_switch_id, nvp_port_id)
return nvp_switch_id, nvp_port_id
nsx_switch_id, nsx_port_id)
return nsx_switch_id, nsx_port_id
def create_nsx_cluster(cluster_opts, concurrent_connections, nsx_gen_timeout):

View File

@ -28,6 +28,7 @@ from neutron.openstack.common import timeutils
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import nsx_utils
from neutron.plugins.nicira.nsxlib import router as routerlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
@ -383,7 +384,7 @@ class NvpSynchronizer():
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
context.session, self._cluster, neutron_port_data['id'])
if lp_uuid:
lswitchport = nvplib.get_port(
lswitchport = switchlib.get_port(
self._cluster, ls_uuid, lp_uuid,
relations='LogicalPortStatus')
except (exceptions.PortNotFoundOnNetwork):

View File

@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from neutron.api.v2.attributes import is_attr_set
from neutron.openstack.common import log
from neutron.version import version_info
@ -32,9 +34,24 @@ def get_tags(**kwargs):
return tags
def device_id_to_vm_id(device_id, obfuscate=False):
# device_id can be longer than 40 characters, for example
# a device_id for a dhcp port is like the following:
#
# dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c
#
# To fit it into an NSX tag we need to hash it, however device_id
# used for ports associated to VM's are small enough so let's skip the
# hashing
if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate:
return hashlib.sha1(device_id).hexdigest()
else:
return device_id
def check_and_truncate(display_name):
if is_attr_set(display_name) and len(display_name) > MAX_DISPLAY_NAME_LEN:
LOG.debug(_("Specified name:'%s' exceeds maximum length. "
"It will be truncated on NVP"), display_name)
"It will be truncated on NSX"), display_name)
return display_name[:MAX_DISPLAY_NAME_LEN]
return display_name or ''

View File

@ -24,6 +24,7 @@ from neutron.plugins.nicira.common import exceptions as p_exc
from neutron.plugins.nicira.dbexts import lsn_db
from neutron.plugins.nicira.dhcp_meta import constants as const
from neutron.plugins.nicira.nsxlib import lsn as lsn_api
from neutron.plugins.nicira.nsxlib import switch as switch_api
from neutron.plugins.nicira import nvplib as nsxlib
LOG = logging.getLogger(__name__)
@ -161,13 +162,13 @@ class LsnManager(object):
self.lsn_port_delete(context, lsn_id, lsn_port_id)
if mac_address == const.METADATA_MAC:
try:
lswitch_port_id = nsxlib.get_port_by_neutron_tag(
lswitch_port_id = switch_api.get_port_by_neutron_tag(
self.cluster, network_id,
const.METADATA_PORT_ID)['uuid']
nsxlib.delete_port(
switch_api.delete_port(
self.cluster, network_id, lswitch_port_id)
except (n_exc.PortNotFoundOnNetwork,
nsxlib.NvpApiClient.NvpApiException):
switch_api.NvpApiClient.NvpApiException):
LOG.warn(_("Metadata port not found while attempting "
"to delete it from network %s"), network_id)
else:
@ -179,7 +180,7 @@ class LsnManager(object):
"""Connect network to LSN via specified port and port_data."""
try:
lsn_id = None
lswitch_port_id = nsxlib.get_port_by_neutron_tag(
lswitch_port_id = switch_api.get_port_by_neutron_tag(
self.cluster, network_id, port_id)['uuid']
lsn_id = self.lsn_get(context, network_id)
lsn_port_id = self.lsn_port_create(context, lsn_id, port_data)
@ -211,7 +212,7 @@ class LsnManager(object):
tenant_id = subnet['tenant_id']
lswitch_port_id = None
try:
lswitch_port_id = nsxlib.create_lport(
lswitch_port_id = switch_api.create_lport(
self.cluster, network_id, tenant_id,
const.METADATA_PORT_ID, const.METADATA_PORT_NAME,
const.METADATA_DEVICE_ID, True)['uuid']
@ -226,7 +227,8 @@ class LsnManager(object):
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
except p_exc.LsnConfigurationConflict:
self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
nsxlib.delete_port(self.cluster, network_id, lswitch_port_id)
switch_api.delete_port(
self.cluster, network_id, lswitch_port_id)
raise p_exc.PortConfigurationError(
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)

View File

@ -0,0 +1,109 @@
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
from neutron.openstack.common import log
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira.nvplib import _build_uri_path
from neutron.plugins.nicira.nvplib import _plug_interface
from neutron.plugins.nicira.nvplib import do_request
from neutron.plugins.nicira.nvplib import get_all_query_pages
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
GWSERVICE_RESOURCE = "gateway-service"
LOG = log.getLogger(__name__)
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
"""Create a NSX Layer-2 Network Gateway Service.
:param cluster: The target NSX cluster
:param tenant_id: Identifier of the Openstack tenant for which
the gateway service.
:param display_name: Descriptive name of this gateway service
:param devices: List of transport node uuids (and network
interfaces on them) to use for the network gateway service
:raise NvpApiException: if there is a problem while communicating
with the NSX controller
"""
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
# NSX is actually the identifier a physical interface on the gateway
# device, which in the Neutron API is referred as interface_name
gateways = [{"transport_node_uuid": device['id'],
"device_id": device['interface_name'],
"type": "L2Gateway"} for device in devices]
gwservice_obj = {
"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id),
"gateways": gateways,
"type": "L2GatewayServiceConfig"
}
return do_request(
"POST", _build_uri_path(GWSERVICE_RESOURCE),
json.dumps(gwservice_obj), cluster=cluster)
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
gateway_id, vlan_id=None):
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
att_obj = {'type': 'L2GatewayAttachment',
'l2_gateway_service_uuid': gateway_id}
if vlan_id:
att_obj['vlan_id'] = vlan_id
return _plug_interface(cluster, lswitch_id, lport_id, att_obj)
def get_l2_gw_service(cluster, gateway_id):
return do_request(
"GET", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_l2_gw_services(cluster, tenant_id=None,
fields=None, filters=None):
actual_filters = dict(filters or {})
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
return get_all_query_pages(
_build_uri_path(GWSERVICE_RESOURCE,
filters=actual_filters),
cluster)
def update_l2_gw_service(cluster, gateway_id, display_name):
# TODO(salvatore-orlando): Allow updates for gateways too
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
if not display_name:
# Nothing to update
return gwservice_obj
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
return do_request("PUT", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
json.dumps(gwservice_obj), cluster=cluster)
def delete_l2_gw_service(cluster, gateway_id):
do_request("DELETE", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)

View File

@ -19,13 +19,13 @@ from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira.nsxlib.switch import get_port
from neutron.plugins.nicira.nsxlib.versioning import DEFAULT_VERSION
from neutron.plugins.nicira.nsxlib.versioning import versioned
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira.nvplib import _build_uri_path
from neutron.plugins.nicira.nvplib import do_request
from neutron.plugins.nicira.nvplib import get_all_query_pages
from neutron.plugins.nicira.nvplib import get_port
HTTP_GET = "GET"
HTTP_POST = "POST"

View File

@ -0,0 +1,379 @@
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
from neutron.common import constants
from neutron.common import exceptions as exception
from neutron.openstack.common import log
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira.nvplib import _build_uri_path
from neutron.plugins.nicira.nvplib import _plug_interface
from neutron.plugins.nicira.nvplib import do_request
from neutron.plugins.nicira.nvplib import get_all_query_pages
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
LSWITCH_RESOURCE = "lswitch"
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
LOG = log.getLogger(__name__)
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
lport_obj['security_profiles'] = list(security_profiles or [])
lport_obj['queue_uuid'] = queue_id
if mac_learning_enabled is not None:
lport_obj["mac_learning"] = mac_learning_enabled
lport_obj["type"] = "LogicalSwitchPortConfig"
for address_pair in list(allowed_address_pairs or []):
lport_obj['allowed_address_pairs'].append(
{'mac_address': address_pair['mac_address'],
'ip_address': address_pair['ip_address']})
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
try:
ls = do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
results.append(ls)
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
results.extend(lookup_switches_by_tag())
except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
shared=None,
**kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config,
"tags": utils.get_tags(os_tid=tenant_id,
quantum_net_id=neutron_net_id)}
# TODO(salv-orlando): Now that we have async status synchronization
# this tag is perhaps not needed anymore
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
uri = _build_uri_path(LSWITCH_RESOURCE)
lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj),
cluster=cluster)
LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id)}
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
try:
return do_request(HTTP_PUT, uri, json.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
fields=fields, filters=filters, relations=relations)
return do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_("Port or Network not found"))
raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port)
except NvpApiClient.NvpApiException:
raise exception.NeutronException()
def get_ports(cluster, networks=None, devices=None, tenants=None):
vm_filter_obsolete = ""
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Neutron checks to see if
# the network has any ports.
if networks:
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = networks[0]
else:
lswitch = "*"
if devices:
for device_id in devices:
vm_filter_obsolete = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id,
obfuscate=True),
vm_filter_obsolete])
vm_filter = '&'.join(
["tag_scope=vm_id",
"tag=%s" % utils.device_id_to_vm_id(device_id),
vm_filter])
if tenants:
for tenant in tenants:
tenant_filter = '&'.join(
["tag_scope=os_tid",
"tag=%s" % tenant,
tenant_filter])
nsx_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path_obsolete = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
# NOTE(armando-migliaccio): by querying with obsolete tag first
# current deployments won't take the performance hit of a double
# call. In release L-** or M-**, we might want to swap the calls
# as it's likely that ports with the new tag would outnumber the
# ones with the old tag
ports = get_all_query_pages(lport_query_path_obsolete, cluster)
if not ports:
ports = get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warn(_("Lswitch %s not found in NSX"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nsx_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
return nsx_lports
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
Returns the NSX UUID of the logical port with tag q_port_id equal to
neutron_port_id or None if the port is not Found.
"""
uri = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid,
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'"),
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
LOG.info(_("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
uri += "relations=%s" % relations
try:
return do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=utils.check_and_truncate(display_name),
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id)))
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = do_request(HTTP_PUT, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Updated logical port %(result)s "
"on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
"""Creates a logical port on the assigned logical switch."""
display_name = utils.check_and_truncate(display_name)
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=utils.get_tags(os_tid=tenant_id,
q_port_id=neutron_port_id,
vm_id=utils.device_id_to_vm_id(device_id))
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid)
result = do_request(HTTP_POST, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def plug_interface(cluster, lswitch_id, port, port_type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = port_type
return _plug_interface(cluster, lswitch_id, port, lport_obj)

View File

@ -20,7 +20,6 @@
# @author: Aaron Rosen, Nicira Networks, Inc.
import hashlib
import json
#FIXME(danwent): I'd like this file to get to the point where it has
@ -42,10 +41,10 @@ HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
# Prefix to be used for all NVP API calls
URI_PREFIX = "/ws.v1"
# Resources exposed by NVP API
LSWITCH_RESOURCE = "lswitch"
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
GWSERVICE_RESOURCE = "gateway-service"
# Current neutron version
NEUTRON_VERSION = version_info.release_string()
@ -54,27 +53,6 @@ NEUTRON_VERSION = version_info.release_string()
# limit be raised in future versions
MAX_PAGE_SIZE = 5000
# TODO(bgh): it would be more efficient to use a bitmap
taken_context_ids = []
# XXX Only cache default for now
_lqueue_cache = {}
def device_id_to_vm_id(device_id, obfuscate=False):
# device_id can be longer than 40 characters, for example
# a device_id for a dhcp port is like the following:
#
# dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c
#
# To fit it into an NVP tag we need to hash it, however device_id
# used for ports associated to VM's are small enough so let's skip the
# hashing
if len(device_id) > utils.MAX_DISPLAY_NAME_LEN or obfuscate:
return hashlib.sha1(device_id).hexdigest()
else:
return device_id
def _build_uri_path(resource,
resource_id=None,
@ -160,411 +138,6 @@ def get_all_query_pages(path, c):
return result_list
# -------------------------------------------------------------------
# Network functions
# -------------------------------------------------------------------
def get_lswitch_by_id(cluster, lswitch_id):
try:
lswitch_uri_path = _build_uri_path(
LSWITCH_RESOURCE, lswitch_id,
relations="LogicalSwitchStatus")
return do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando): this should not raise a neutron exception
raise exception.NetworkNotFound(net_id=lswitch_id)
def get_lswitches(cluster, neutron_net_id):
def lookup_switches_by_tag():
# Fetch extra logical switches
lswitch_query_path = _build_uri_path(
LSWITCH_RESOURCE,
fields="uuid,display_name,tags,lport_count",
relations="LogicalSwitchStatus",
filters={'tag': neutron_net_id,
'tag_scope': 'quantum_net_id'})
return get_all_query_pages(lswitch_query_path, cluster)
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
relations="LogicalSwitchStatus")
results = []
try:
ls = do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
results.append(ls)
for tag in ls['tags']:
if (tag['scope'] == "multi_lswitch" and
tag['tag'] == "True"):
results.extend(lookup_switches_by_tag())
except exception.NotFound:
# This is legit if the neutron network was created using
# a post-Havana version of the plugin
results.extend(lookup_switches_by_tag())
if results:
return results
else:
raise exception.NetworkNotFound(net_id=neutron_net_id)
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
transport_zones_config,
shared=None,
**kwargs):
# The tag scope adopts a slightly different naming convention for
# historical reasons
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"transport_zones": transport_zones_config,
"tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": neutron_net_id, "scope": "quantum_net_id"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]}
# TODO(salv-orlando): Now that we have async status synchronization
# this tag is perhaps not needed anymore
if shared:
lswitch_obj["tags"].append({"tag": "true",
"scope": "shared"})
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
uri = _build_uri_path(LSWITCH_RESOURCE)
lswitch = do_request(HTTP_POST, uri, json.dumps(lswitch_obj),
cluster=cluster)
LOG.debug(_("Created logical switch: %s"), lswitch['uuid'])
return lswitch
def update_lswitch(cluster, lswitch_id, display_name,
tenant_id=None, **kwargs):
uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
"tags": [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]}
if "tags" in kwargs:
lswitch_obj["tags"].extend(kwargs["tags"])
try:
return do_request(HTTP_PUT, uri, json.dumps(lswitch_obj),
cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=lswitch_id)
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
"""Create a NVP Layer-2 Network Gateway Service.
:param cluster: The target NVP cluster
:param tenant_id: Identifier of the Openstack tenant for which
the gateway service.
:param display_name: Descriptive name of this gateway service
:param devices: List of transport node uuids (and network
interfaces on them) to use for the network gateway service
:raise NvpApiException: if there is a problem while communicating
with the NVP controller
"""
tags = [{"tag": tenant_id, "scope": "os_tid"},
{"tag": NEUTRON_VERSION, "scope": "quantum"}]
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
# NVP is actually the identifier a physical interface on the gateway
# device, which in the Neutron API is referred as interface_name
gateways = [{"transport_node_uuid": device['id'],
"device_id": device['interface_name'],
"type": "L2Gateway"} for device in devices]
gwservice_obj = {
"display_name": utils.check_and_truncate(display_name),
"tags": tags,
"gateways": gateways,
"type": "L2GatewayServiceConfig"
}
return do_request(
"POST", _build_uri_path(GWSERVICE_RESOURCE),
json.dumps(gwservice_obj), cluster=cluster)
def delete_l2_gw_service(cluster, gateway_id):
do_request("DELETE", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_l2_gw_service(cluster, gateway_id):
return do_request(
"GET", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_l2_gw_services(cluster, tenant_id=None,
fields=None, filters=None):
actual_filters = dict(filters or {})
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
return get_all_query_pages(
_build_uri_path(GWSERVICE_RESOURCE,
filters=actual_filters),
cluster)
def update_l2_gw_service(cluster, gateway_id, display_name):
# TODO(salvatore-orlando): Allow updates for gateways too
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
if not display_name:
# Nothing to update
return gwservice_obj
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
return do_request("PUT", _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
json.dumps(gwservice_obj), cluster=cluster)
def delete_network(cluster, net_id, lswitch_id):
delete_networks(cluster, net_id, [lswitch_id])
#TODO(salvatore-orlando): Simplify and harmonize
def delete_networks(cluster, net_id, lswitch_ids):
for ls_id in lswitch_ids:
path = "/ws.v1/lswitch/%s" % ls_id
try:
do_request(HTTP_DELETE, path, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Network not found, Error: %s"), str(e))
raise exception.NetworkNotFound(net_id=ls_id)
def query_lswitch_lports(cluster, ls_uuid, fields="*",
filters=None, relations=None):
# Fix filter for attachments
if filters and "attachment" in filters:
filters['attachment_vif_uuid'] = filters["attachment"]
del filters['attachment']
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
fields=fields, filters=filters, relations=relations)
return do_request(HTTP_GET, uri, cluster=cluster)['results']
def delete_port(cluster, switch, port):
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
try:
do_request(HTTP_DELETE, uri, cluster=cluster)
except exception.NotFound:
LOG.exception(_("Port or Network not found"))
raise exception.PortNotFoundOnNetwork(
net_id=switch, port_id=port)
except NvpApiClient.NvpApiException:
raise exception.NeutronException()
def get_ports(cluster, networks=None, devices=None, tenants=None):
vm_filter_obsolete = ""
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Neutron checks to see if
# the network has any ports.
if networks:
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = networks[0]
else:
lswitch = "*"
if devices:
for device_id in devices:
vm_filter_obsolete = '&'.join(
["tag_scope=vm_id",
"tag=%s" % device_id_to_vm_id(device_id, obfuscate=True),
vm_filter_obsolete])
vm_filter = '&'.join(
["tag_scope=vm_id",
"tag=%s" % device_id_to_vm_id(device_id),
vm_filter])
if tenants:
for tenant in tenants:
tenant_filter = '&'.join(
["tag_scope=os_tid",
"tag=%s" % tenant,
tenant_filter])
nvp_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
lport_query_path_obsolete = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
# NOTE(armando-migliaccio): by querying with obsolete tag first
# current deployments won't take the performance hit of a double
# call. In release L-** or M-**, we might want to swap the calls
# as it's likely that ports with the new tag would outnumber the
# ones with the old tag
ports = get_all_query_pages(lport_query_path_obsolete, cluster)
if not ports:
ports = get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warn(_("Lswitch %s not found in NVP"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nvp_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
return nvp_lports
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
"""Get port by neutron tag.
Returns the NVP UUID of the logical port with tag q_port_id equal to
neutron_port_id or None if the port is not Found.
"""
uri = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid,
fields='uuid',
filters={'tag': neutron_port_id,
'tag_scope': 'q_port_id'})
LOG.debug(_("Looking for port with q_port_id tag '%(neutron_port_id)s' "
"on: '%(lswitch_uuid)s'"),
{'neutron_port_id': neutron_port_id,
'lswitch_uuid': lswitch_uuid})
res = do_request(HTTP_GET, uri, cluster=cluster)
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]
def get_port(cluster, network, port, relations=None):
LOG.info(_("get_port() %(network)s %(port)s"),
{'network': network, 'port': port})
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
if relations:
uri += "relations=%s" % relations
try:
return do_request(HTTP_GET, uri, cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port, net_id=network)
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
lport_obj['security_profiles'] = list(security_profiles or [])
lport_obj['queue_uuid'] = queue_id
if mac_learning_enabled is not None:
lport_obj["mac_learning"] = mac_learning_enabled
lport_obj["type"] = "LogicalSwitchPortConfig"
for address_pair in list(allowed_address_pairs or []):
lport_obj['allowed_address_pairs'].append(
{'mac_address': address_pair['mac_address'],
'ip_address': address_pair['ip_address']})
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=utils.check_and_truncate(display_name),
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='vm_id', tag=device_id_to_vm_id(device_id)),
dict(scope='quantum', tag=NEUTRON_VERSION)])
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
result = do_request(HTTP_PUT, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Updated logical port %(result)s "
"on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
except exception.NotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=lport_uuid, net_id=lswitch_uuid)
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None,
security_profiles=None, queue_id=None,
mac_learning_enabled=None, allowed_address_pairs=None):
"""Creates a logical port on the assigned logical switch."""
display_name = utils.check_and_truncate(display_name)
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=neutron_port_id),
dict(scope='vm_id', tag=device_id_to_vm_id(device_id)),
dict(scope='quantum', tag=NEUTRON_VERSION)],
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled, security_profiles,
queue_id, mac_learning_enabled,
allowed_address_pairs)
path = _build_uri_path(LSWITCHPORT_RESOURCE,
parent_resource_id=lswitch_uuid)
result = do_request(HTTP_POST, path, json.dumps(lport_obj),
cluster=cluster)
LOG.debug(_("Created logical port %(result)s on logical switch %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def get_port_status(cluster, lswitch_id, port_id):
"""Retrieve the operational status of the port."""
try:
r = do_request(HTTP_GET,
"/ws.v1/lswitch/%s/lport/%s/status" %
(lswitch_id, port_id), cluster=cluster)
except exception.NotFound as e:
LOG.error(_("Port not found, Error: %s"), str(e))
raise exception.PortNotFoundOnNetwork(
port_id=port_id, net_id=lswitch_id)
if r['link_status_up'] is True:
return constants.PORT_STATUS_ACTIVE
else:
return constants.PORT_STATUS_DOWN
def _plug_interface(cluster, lswitch_id, lport_id, att_obj):
uri = _build_uri_path(LSWITCHPORT_RESOURCE, lport_id, lswitch_id,
is_attachment=True)
@ -572,25 +145,6 @@ def _plug_interface(cluster, lswitch_id, lport_id, att_obj):
cluster=cluster)
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
gateway_id, vlan_id=None):
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
att_obj = {'type': 'L2GatewayAttachment',
'l2_gateway_service_uuid': gateway_id}
if vlan_id:
att_obj['vlan_id'] = vlan_id
return _plug_interface(cluster, lswitch_id, lport_id, att_obj)
def plug_interface(cluster, lswitch_id, port, type, attachment=None):
"""Plug a VIF Attachment object in a logical port."""
lport_obj = {}
if attachment:
lport_obj["vif_uuid"] = attachment
lport_obj["type"] = type
return _plug_interface(cluster, lswitch_id, port, lport_obj)
#------------------------------------------------------------------------------
# Security Profile convenience functions.
#------------------------------------------------------------------------------

View File

@ -0,0 +1,147 @@
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neutron.plugins.nicira.nsxlib import l2gateway as l2gwlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import nvplib
from neutron.tests.unit.nicira.test_nvplib import NsxlibNegativeBaseTestCase
from neutron.tests.unit.nicira.test_nvplib import NvplibTestCase
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
class L2GatewayNegativeTestCase(NsxlibNegativeBaseTestCase):
def test_create_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
l2gwlib.create_l2_gw_service,
self.fake_cluster,
'fake-tenant',
'fake-gateway',
[{'id': _uuid(),
'interface_name': 'xxx'}])
def test_delete_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
l2gwlib.delete_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_get_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
l2gwlib.get_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_update_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
l2gwlib.update_l2_gw_service,
self.fake_cluster,
'fake-gateway',
'pluto')
class L2GatewayTestCase(NvplibTestCase):
def _create_gw_service(self, node_uuid, display_name,
tenant_id='fake_tenant'):
return l2gwlib.create_l2_gw_service(self.fake_cluster,
tenant_id,
display_name,
[{'id': node_uuid,
'interface_name': 'xxx'}])
def test_create_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
response = self._create_gw_service(node_uuid, display_name)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
gateways = response.get('gateways', [])
self.assertEqual(len(gateways), 1)
self.assertEqual(gateways[0]['type'], 'L2Gateway')
self.assertEqual(gateways[0]['device_id'], 'xxx')
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
def test_update_l2_gw_service(self):
display_name = 'fake-gateway'
new_display_name = 'still-fake-gateway'
node_uuid = _uuid()
res1 = self._create_gw_service(node_uuid, display_name)
gw_id = res1['uuid']
res2 = l2gwlib.update_l2_gw_service(
self.fake_cluster, gw_id, new_display_name)
self.assertEqual(res2['display_name'], new_display_name)
def test_get_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
self.assertEqual(response.get('uuid'), gw_id)
def test_list_l2_gw_service(self):
gw_ids = []
for name in ('fake-1', 'fake-2'):
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 2)
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
def test_list_l2_gw_service_by_tenant(self):
gw_ids = [self._create_gw_service(
_uuid(), name, tenant_id=name)['uuid']
for name in ('fake-1', 'fake-2')]
results = l2gwlib.get_l2_gw_services(self.fake_cluster,
tenant_id='fake-1')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['uuid'], gw_ids[0])
def test_delete_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id)
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 0)
def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo'
node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(
self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(),
'fake-gw-port', gw_id, True)
l2gwlib.plug_l2_gw_service(
self.fake_cluster, lswitch['uuid'],
lport['uuid'], gw_id)
uri = nvplib._build_uri_path(nvplib.LSWITCHPORT_RESOURCE,
lport['uuid'],
lswitch['uuid'],
is_attachment=True)
resp_obj = nvplib.do_request("GET", uri,
cluster=self.fake_cluster)
self.assertIn('LogicalPortAttachment', resp_obj)
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
'L2GatewayAttachment')

View File

@ -20,6 +20,7 @@ from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.nsxlib import router as routerlib
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira import nvplib
from neutron.tests.unit.nicira.test_nvplib import NsxlibNegativeBaseTestCase
@ -688,13 +689,13 @@ class TestLogicalRouters(NvplibTestCase):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz',
'name', 'device_id', True)
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, 'xyz',
'name', 'device_id', True)
lrouter = routerlib.create_lrouter(self.fake_cluster,
uuidutils.generate_uuid(),
tenant_id,

View File

@ -0,0 +1,289 @@
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import mock
from neutron.common import constants
from neutron.common import exceptions
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira.nsxlib import switch as switchlib
from neutron.tests.unit.nicira.test_nvplib import NvplibTestCase
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
class LogicalSwitchesTestCase(NvplibTestCase):
def test_create_and_get_lswitches_single(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id,
'fake-switch',
transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'],
lswitch['uuid'])
def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
tenant_id,
_uuid(),
'*' * 50,
transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid'])
self.assertEqual(res_lswitch[0]['display_name'], '*' * 40)
def test_create_and_get_lswitches_multiple(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
network_id = _uuid()
main_lswitch = switchlib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch', transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch
second_lswitch = switchlib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch-2', transport_zones_config)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
network_id)
self.assertEqual(len(res_lswitch), 2)
switch_uuids = [ls['uuid'] for ls in res_lswitch]
self.assertIn(main_lswitch['uuid'], switch_uuids)
self.assertIn(second_lswitch['uuid'], switch_uuids)
for ls in res_lswitch:
if ls['uuid'] == main_lswitch['uuid']:
main_ls = ls
else:
second_ls = ls
main_ls_tags = self._build_tag_dict(main_ls['tags'])
second_ls_tags = self._build_tag_dict(second_ls['tags'])
self.assertIn('multi_lswitch', main_ls_tags)
self.assertNotIn('multi_lswitch', second_ls_tags)
self.assertIn('quantum_net_id', main_ls_tags)
self.assertIn('quantum_net_id', second_ls_tags)
self.assertEqual(main_ls_tags['quantum_net_id'],
network_id)
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def test_update_lswitch(self):
new_name = 'new-name'
new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}]
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'],
new_name, tags=new_tags)
res_lswitch = switchlib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['display_name'], new_name)
switch_tags = self._build_tag_dict(res_lswitch[0]['tags'])
self.assertIn('new_tag', switch_tags)
self.assertEqual(switch_tags['new_tag'], 'xxx')
def test_update_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.update_lswitch,
self.fake_cluster, 'whatever',
'foo', 'bar')
def test_delete_networks(self):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
switchlib.delete_networks(self.fake_cluster, lswitch['uuid'],
[lswitch['uuid']])
self.assertRaises(exceptions.NotFound,
switchlib.get_lswitches,
self.fake_cluster,
lswitch['uuid'])
def test_delete_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
switchlib.delete_networks,
self.fake_cluster, 'whatever', ['whatever'])
class LogicalPortsTestCase(NvplibTestCase):
def _create_switch_and_port(self, tenant_id='pippo',
neutron_port_id='whatever',
name='name', device_id='device_id'):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(self.fake_cluster,
_uuid(), tenant_id, 'fake-switch',
transport_zones_config)
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id,
name, device_id, True)
return lswitch, lport
def test_create_and_get_port(self):
lswitch, lport = self._create_switch_and_port()
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
# Try again with relation
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'],
relations='LogicalPortStatus')
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_plug_interface(self):
lswitch, lport = self._create_switch_and_port()
switchlib.plug_interface(self.fake_cluster, lswitch['uuid'],
lport['uuid'], 'VifAttachment', 'fake')
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_get_port_by_tag(self):
lswitch, lport = self._create_switch_and_port()
lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'],
'whatever')
self.assertIsNotNone(lport2)
self.assertEqual(lport['uuid'], lport2['uuid'])
def test_get_port_by_tag_not_found_returns_None(self):
tenant_id = 'pippo'
neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(
self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config)
lport = switchlib.get_port_by_neutron_tag(
self.fake_cluster, lswitch['uuid'], neutron_port_id)
self.assertIsNone(lport)
def test_get_port_status(self):
lswitch, lport = self._create_switch_and_port()
status = switchlib.get_port_status(
self.fake_cluster, lswitch['uuid'], lport['uuid'])
self.assertEqual(constants.PORT_STATUS_ACTIVE, status)
def test_get_port_status_non_existent_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.get_port_status,
self.fake_cluster,
'boo', 'boo')
def test_update_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.update_port(
self.fake_cluster, lswitch['uuid'], lport['uuid'],
'neutron_port_id', 'pippo2', 'new_name', 'device_id', False)
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
self.assertEqual('new_name', lport_res['display_name'])
self.assertEqual('False', lport_res['admin_status_enabled'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertIn('vm_id', port_tags)
def test_create_port_device_id_less_than_40_chars(self):
lswitch, lport = self._create_switch_and_port()
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertEqual('device_id', port_tags['vm_id'])
def test_create_port_device_id_more_than_40_chars(self):
dev_id = "this_is_a_very_long_device_id_with_lots_of_characters"
lswitch, lport = self._create_switch_and_port(device_id=dev_id)
lport_res = switchlib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertNotEqual(len(dev_id), len(port_tags['vm_id']))
def test_get_ports_with_obsolete_and_new_vm_id_tag(self):
def obsolete(device_id, obfuscate=False):
return hashlib.sha1(device_id).hexdigest()
with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete):
dev_id1 = "short-dev-id-1"
_, lport1 = self._create_switch_and_port(device_id=dev_id1)
dev_id2 = "short-dev-id-2"
_, lport2 = self._create_switch_and_port(device_id=dev_id2)
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertNotEqual(dev_id1, port_tags['vm_id'])
lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertEqual(dev_id2, port_tags['vm_id'])
def test_update_non_existent_port_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.update_port, self.fake_cluster,
'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False)
def test_delete_port(self):
lswitch, lport = self._create_switch_and_port()
switchlib.delete_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.get_port, self.fake_cluster,
lswitch['uuid'], lport['uuid'])
def test_delete_non_existent_port_raises(self):
lswitch = self._create_switch_and_port()[0]
self.assertRaises(exceptions.PortNotFoundOnNetwork,
switchlib.delete_port, self.fake_cluster,
lswitch['uuid'], 'bad_port_uuid')
def test_query_lswitch_ports(self):
lswitch, lport = self._create_switch_and_port()
switch_port_uuids = [
switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
for k in range(2)]
switch_port_uuids.append(lport['uuid'])
ports = switchlib.query_lswitch_lports(
self.fake_cluster, lswitch['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], switch_port_uuids)

View File

@ -435,7 +435,8 @@ class LsnManagerTestCase(base.BaseTestCase):
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag'):
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag'):
expected = self.manager.lsn_port_dhcp_setup(
mock.ANY, mock.ANY, mock.ANY, mock.ANY, subnet_config=sub)
self.assertEqual(
@ -453,7 +454,8 @@ class LsnManagerTestCase(base.BaseTestCase):
self.assertEqual(1, f.call_count)
def test_lsn_port_dhcp_setup_with_not_found(self):
with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag') as f:
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
@ -462,7 +464,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def test_lsn_port_dhcp_setup_with_conflict(self):
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag'):
with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'):
with mock.patch.object(self.manager, 'lsn_port_delete') as g:
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
@ -558,7 +560,7 @@ class LsnManagerTestCase(base.BaseTestCase):
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f:
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
f.return_value = {'uuid': self.port_id}
self.manager.lsn_port_metadata_setup(mock.ANY, self.lsn_id, subnet)
self.assertEqual(1, self.mock_lsn_api.lsn_port_create.call_count)
@ -572,7 +574,7 @@ class LsnManagerTestCase(base.BaseTestCase):
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f:
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_metadata_setup,
@ -585,8 +587,8 @@ class LsnManagerTestCase(base.BaseTestCase):
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
with mock.patch.object(lsn_man.nsxlib, 'create_lport') as f:
with mock.patch.object(lsn_man.nsxlib, 'delete_port') as g:
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
@ -611,8 +613,9 @@ class LsnManagerTestCase(base.BaseTestCase):
def test_lsn_port_dispose_meta_mac(self):
self.mac = constants.METADATA_MAC
with mock.patch.object(lsn_man.nsxlib, 'get_port_by_neutron_tag') as f:
with mock.patch.object(lsn_man.nsxlib, 'delete_port') as g:
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self._test_lsn_port_dispose_with_values(
self.lsn_id, self.lsn_port_id, 1)

View File

@ -197,8 +197,8 @@ class TestNiciraPortsV2(NiciraPluginV2TestCase,
with self.port(subnet=sub):
with self.port(subnet=sub):
plugin = manager.NeutronManager.get_plugin()
ls = nvplib.get_lswitches(plugin.cluster,
net['network']['id'])
ls = nsxlib.switch.get_lswitches(plugin.cluster,
net['network']['id'])
self.assertEqual(len(ls), 2)
def test_update_port_delete_ip(self):
@ -236,7 +236,7 @@ class TestNiciraPortsV2(NiciraPluginV2TestCase,
self.assertFalse(self.fc._fake_lswitch_lport_dict)
def test_create_port_nvp_error_no_orphan_left(self):
with mock.patch.object(nvplib, 'create_lport',
with mock.patch.object(nsxlib.switch, 'create_lport',
side_effect=NvpApiClient.NvpApiException):
with self.network() as net:
net_id = net['network']['id']
@ -1473,7 +1473,7 @@ class TestNiciraNetworkGateway(NiciraPluginV2TestCase,
def test_update_network_gateway_with_name_calls_backend(self):
with mock.patch.object(
nvplib, 'update_l2_gw_service') as mock_update_gw:
nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw:
with self._network_gateway(name='cavani') as nw_gw:
nw_gw_id = nw_gw[self.resource]['id']
self._update(nvp_networkgw.COLLECTION_NAME, nw_gw_id,
@ -1483,7 +1483,7 @@ class TestNiciraNetworkGateway(NiciraPluginV2TestCase,
def test_update_network_gateway_without_name_does_not_call_backend(self):
with mock.patch.object(
nvplib, 'update_l2_gw_service') as mock_update_gw:
nsxlib.l2gateway, 'update_l2_gw_service') as mock_update_gw:
with self._network_gateway(name='something') as nw_gw:
nw_gw_id = nw_gw[self.resource]['id']
self._update(nvp_networkgw.COLLECTION_NAME, nw_gw_id,
@ -1510,7 +1510,7 @@ class TestNiciraNetworkGateway(NiciraPluginV2TestCase,
def raise_nvp_api_exc(*args, **kwargs):
raise NvpApiClient.NvpApiException
with mock.patch.object(nvplib,
with mock.patch.object(nsxlib.l2gateway,
'create_l2_gw_service',
new=raise_nvp_api_exc):
res = self._create_network_gateway(
@ -1519,7 +1519,7 @@ class TestNiciraNetworkGateway(NiciraPluginV2TestCase,
self.assertEqual(500, res.status_int)
def test_create_network_gateway_nvp_error_returns_409(self):
with mock.patch.object(nvplib,
with mock.patch.object(nsxlib.l2gateway,
'create_l2_gw_service',
side_effect=NvpApiClient.Conflict):
res = self._create_network_gateway(

View File

@ -102,7 +102,8 @@ class NsxUtilsTestCase(base.BaseTestCase):
exp_lp_uuid = uuidutils.generate_uuid()
ret_value = None, exp_lp_uuid
self._mock_port_mapping_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
with mock.patch(nicira_method('query_lswitch_lports',
module_name='nsxlib.switch'),
return_value=[{'uuid': exp_lp_uuid,
'_relations': {
'LogicalSwitchConfig': {
@ -117,7 +118,8 @@ class NsxUtilsTestCase(base.BaseTestCase):
exp_lp_uuid = uuidutils.generate_uuid()
ret_value = None, None
self._mock_port_mapping_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
with mock.patch(nicira_method('query_lswitch_lports',
module_name='nsxlib.switch'),
return_value=[{'uuid': exp_lp_uuid,
'_relations': {
'LogicalSwitchConfig': {
@ -130,7 +132,8 @@ class NsxUtilsTestCase(base.BaseTestCase):
# mappings are not found both in the db and the backend
ret_value = None, None
self._mock_port_mapping_db_calls(ret_value)
with mock.patch(nicira_method('query_lswitch_lports'),
with mock.patch(nicira_method('query_lswitch_lports',
module_name='nsxlib.switch'),
return_value=[]):
self._verify_get_nsx_switch_and_port_id(None, None)
@ -146,7 +149,8 @@ class NsxUtilsTestCase(base.BaseTestCase):
# found for a given network identifier
exp_ls_uuids = [uuidutils.generate_uuid()]
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
with mock.patch(nicira_method('get_lswitches',
module_name='nsxlib.switch'),
return_value=[{'uuid': uuid}
for uuid in exp_ls_uuids]):
self._verify_get_nsx_switch_ids(exp_ls_uuids)
@ -155,7 +159,8 @@ class NsxUtilsTestCase(base.BaseTestCase):
# This test verifies that the function returns None if the mappings
# are not found both in the db and in the backend
self._mock_network_mapping_db_calls(None)
with mock.patch(nicira_method('get_lswitches'),
with mock.patch(nicira_method('get_lswitches',
module_name='nsxlib.switch'),
return_value=[]):
self._verify_get_nsx_switch_ids(None)

View File

@ -17,10 +17,8 @@
#
# @author: Salvatore Orlando, VMware
import hashlib
import mock
from neutron.common import constants
from neutron.common import exceptions
from neutron.plugins.nicira.common import config # noqa
from neutron.plugins.nicira.common import exceptions as nvp_exc
@ -104,247 +102,6 @@ class NsxlibNegativeBaseTestCase(base.BaseTestCase):
self.addCleanup(self.mock_nvpapi.stop)
class L2GatewayNegativeTestCase(NsxlibNegativeBaseTestCase):
def test_create_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
nvplib.create_l2_gw_service,
self.fake_cluster,
'fake-tenant',
'fake-gateway',
[{'id': _uuid(),
'interface_name': 'xxx'}])
def test_delete_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
nvplib.delete_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_get_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
nvplib.get_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_update_l2_gw_service_on_failure(self):
self.assertRaises(nvplib.NvpApiClient.NvpApiException,
nvplib.update_l2_gw_service,
self.fake_cluster,
'fake-gateway',
'pluto')
class TestNvplibL2Gateway(NvplibTestCase):
def _create_gw_service(self, node_uuid, display_name,
tenant_id='fake_tenant'):
return nvplib.create_l2_gw_service(self.fake_cluster,
tenant_id,
display_name,
[{'id': node_uuid,
'interface_name': 'xxx'}])
def test_create_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
response = self._create_gw_service(node_uuid, display_name)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
gateways = response.get('gateways', [])
self.assertEqual(len(gateways), 1)
self.assertEqual(gateways[0]['type'], 'L2Gateway')
self.assertEqual(gateways[0]['device_id'], 'xxx')
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
def test_update_l2_gw_service(self):
display_name = 'fake-gateway'
new_display_name = 'still-fake-gateway'
node_uuid = _uuid()
res1 = self._create_gw_service(node_uuid, display_name)
gw_id = res1['uuid']
res2 = nvplib.update_l2_gw_service(self.fake_cluster, gw_id,
new_display_name)
self.assertEqual(res2['display_name'], new_display_name)
def test_get_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
response = nvplib.get_l2_gw_service(self.fake_cluster, gw_id)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
self.assertEqual(response.get('uuid'), gw_id)
def test_list_l2_gw_service(self):
gw_ids = []
for name in ('fake-1', 'fake-2'):
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
results = nvplib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 2)
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
def test_list_l2_gw_service_by_tenant(self):
gw_ids = [self._create_gw_service(
_uuid(), name, tenant_id=name)['uuid']
for name in ('fake-1', 'fake-2')]
results = nvplib.get_l2_gw_services(self.fake_cluster,
tenant_id='fake-1')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['uuid'], gw_ids[0])
def test_delete_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
nvplib.delete_l2_gw_service(self.fake_cluster, gw_id)
results = nvplib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 0)
def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo'
node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = nvplib.create_lport(self.fake_cluster,
lswitch['uuid'],
tenant_id,
_uuid(),
'fake-gw-port',
gw_id,
True)
nvplib.plug_l2_gw_service(self.fake_cluster,
lswitch['uuid'],
lport['uuid'],
gw_id)
uri = nvplib._build_uri_path(nvplib.LSWITCHPORT_RESOURCE,
lport['uuid'],
lswitch['uuid'],
is_attachment=True)
resp_obj = nvplib.do_request("GET", uri,
cluster=self.fake_cluster)
self.assertIn('LogicalPortAttachment', resp_obj)
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
'L2GatewayAttachment')
class TestNvplibLogicalSwitches(NvplibTestCase):
def test_create_and_get_lswitches_single(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
tenant_id,
'fake-switch',
transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'],
lswitch['uuid'])
def test_create_and_get_lswitches_single_name_exceeds_40_chars(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
tenant_id,
_uuid(),
'*' * 50,
transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid'])
self.assertEqual(res_lswitch[0]['display_name'], '*' * 40)
def test_create_and_get_lswitches_multiple(self):
tenant_id = 'pippo'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
network_id = _uuid()
main_lswitch = nvplib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch', transport_zones_config,
tags=[{'scope': 'multi_lswitch', 'tag': 'True'}])
# Create secondary lswitch
second_lswitch = nvplib.create_lswitch(
self.fake_cluster, network_id,
tenant_id, 'fake-switch-2', transport_zones_config)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
network_id)
self.assertEqual(len(res_lswitch), 2)
switch_uuids = [ls['uuid'] for ls in res_lswitch]
self.assertIn(main_lswitch['uuid'], switch_uuids)
self.assertIn(second_lswitch['uuid'], switch_uuids)
for ls in res_lswitch:
if ls['uuid'] == main_lswitch['uuid']:
main_ls = ls
else:
second_ls = ls
main_ls_tags = self._build_tag_dict(main_ls['tags'])
second_ls_tags = self._build_tag_dict(second_ls['tags'])
self.assertIn('multi_lswitch', main_ls_tags)
self.assertNotIn('multi_lswitch', second_ls_tags)
self.assertIn('quantum_net_id', main_ls_tags)
self.assertIn('quantum_net_id', second_ls_tags)
self.assertEqual(main_ls_tags['quantum_net_id'],
network_id)
self.assertEqual(second_ls_tags['quantum_net_id'],
network_id)
def test_update_lswitch(self):
new_name = 'new-name'
new_tags = [{'scope': 'new_tag', 'tag': 'xxx'}]
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
nvplib.update_lswitch(self.fake_cluster, lswitch['uuid'],
new_name, tags=new_tags)
res_lswitch = nvplib.get_lswitches(self.fake_cluster,
lswitch['uuid'])
self.assertEqual(len(res_lswitch), 1)
self.assertEqual(res_lswitch[0]['display_name'], new_name)
switch_tags = self._build_tag_dict(res_lswitch[0]['tags'])
self.assertIn('new_tag', switch_tags)
self.assertEqual(switch_tags['new_tag'], 'xxx')
def test_update_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
nvplib.update_lswitch,
self.fake_cluster, 'whatever',
'foo', 'bar')
def test_delete_networks(self):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(),
'pippo',
'fake-switch',
transport_zones_config)
nvplib.delete_networks(self.fake_cluster, lswitch['uuid'],
[lswitch['uuid']])
self.assertRaises(exceptions.NotFound,
nvplib.get_lswitches,
self.fake_cluster,
lswitch['uuid'])
def test_delete_non_existing_lswitch_raises(self):
self.assertRaises(exceptions.NetworkNotFound,
nvplib.delete_networks,
self.fake_cluster, 'whatever', ['whatever'])
class TestNvplibSecurityProfile(NvplibTestCase):
def test_create_and_get_security_profile(self):
@ -448,154 +205,6 @@ class TestNvplibSecurityProfile(NvplibTestCase):
self.fake_cluster, 'whatever')
class TestNvplibLogicalPorts(NvplibTestCase):
def _create_switch_and_port(self, tenant_id='pippo',
neutron_port_id='whatever',
name='name', device_id='device_id'):
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster,
_uuid(), tenant_id, 'fake-switch',
transport_zones_config)
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, neutron_port_id,
name, device_id, True)
return lswitch, lport
def test_create_and_get_port(self):
lswitch, lport = self._create_switch_and_port()
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
# Try again with relation
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'],
relations='LogicalPortStatus')
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_plug_interface(self):
lswitch, lport = self._create_switch_and_port()
nvplib.plug_interface(self.fake_cluster, lswitch['uuid'],
lport['uuid'], 'VifAttachment', 'fake')
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
def test_get_port_by_tag(self):
lswitch, lport = self._create_switch_and_port()
lport2 = nvplib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'],
'whatever')
self.assertIsNotNone(lport2)
self.assertEqual(lport['uuid'], lport2['uuid'])
def test_get_port_by_tag_not_found_returns_None(self):
tenant_id = 'pippo'
neutron_port_id = 'whatever'
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id, _uuid(),
'fake-switch', transport_zones_config)
lport = nvplib.get_port_by_neutron_tag(self.fake_cluster,
lswitch['uuid'],
neutron_port_id)
self.assertIsNone(lport)
def test_get_port_status(self):
lswitch, lport = self._create_switch_and_port()
status = nvplib.get_port_status(self.fake_cluster,
lswitch['uuid'],
lport['uuid'])
self.assertEqual(constants.PORT_STATUS_ACTIVE, status)
def test_get_port_status_non_existent_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
nvplib.get_port_status,
self.fake_cluster,
'boo', 'boo')
def test_update_port(self):
lswitch, lport = self._create_switch_and_port()
nvplib.update_port(
self.fake_cluster, lswitch['uuid'], lport['uuid'],
'neutron_port_id', 'pippo2', 'new_name', 'device_id', False)
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertEqual(lport['uuid'], lport_res['uuid'])
self.assertEqual('new_name', lport_res['display_name'])
self.assertEqual('False', lport_res['admin_status_enabled'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertIn('os_tid', port_tags)
self.assertIn('q_port_id', port_tags)
self.assertIn('vm_id', port_tags)
def test_create_port_device_id_less_than_40_chars(self):
lswitch, lport = self._create_switch_and_port()
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertEqual('device_id', port_tags['vm_id'])
def test_create_port_device_id_more_than_40_chars(self):
dev_id = "this_is_a_very_long_device_id_with_lots_of_characters"
lswitch, lport = self._create_switch_and_port(device_id=dev_id)
lport_res = nvplib.get_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
port_tags = self._build_tag_dict(lport_res['tags'])
self.assertNotEqual(len(dev_id), len(port_tags['vm_id']))
def test_get_ports_with_obsolete_and_new_vm_id_tag(self):
def obsolete(device_id, obfuscate=False):
return hashlib.sha1(device_id).hexdigest()
with mock.patch.object(nvplib, 'device_id_to_vm_id', new=obsolete):
dev_id1 = "short-dev-id-1"
_, lport1 = self._create_switch_and_port(device_id=dev_id1)
dev_id2 = "short-dev-id-2"
_, lport2 = self._create_switch_and_port(device_id=dev_id2)
lports = nvplib.get_ports(self.fake_cluster, None, [dev_id1])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertNotEqual(dev_id1, port_tags['vm_id'])
lports = nvplib.get_ports(self.fake_cluster, None, [dev_id2])
port_tags = self._build_tag_dict(lports['whatever']['tags'])
self.assertEqual(dev_id2, port_tags['vm_id'])
def test_update_non_existent_port_raises(self):
self.assertRaises(exceptions.PortNotFoundOnNetwork,
nvplib.update_port, self.fake_cluster,
'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False)
def test_delete_port(self):
lswitch, lport = self._create_switch_and_port()
nvplib.delete_port(self.fake_cluster,
lswitch['uuid'], lport['uuid'])
self.assertRaises(exceptions.PortNotFoundOnNetwork,
nvplib.get_port, self.fake_cluster,
lswitch['uuid'], lport['uuid'])
def test_delete_non_existent_port_raises(self):
lswitch = self._create_switch_and_port()[0]
self.assertRaises(exceptions.PortNotFoundOnNetwork,
nvplib.delete_port, self.fake_cluster,
lswitch['uuid'], 'bad_port_uuid')
def test_query_lswitch_ports(self):
lswitch, lport = self._create_switch_and_port()
switch_port_uuids = [
nvplib.create_lport(
self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k,
'port-%s' % k, 'deviceid-%s' % k, True)['uuid']
for k in range(2)]
switch_port_uuids.append(lport['uuid'])
ports = nvplib.query_lswitch_lports(self.fake_cluster, lswitch['uuid'])
self.assertEqual(len(ports), 3)
for res_port in ports:
self.assertIn(res_port['uuid'], switch_port_uuids)
class TestNvplibClusterManagement(NvplibTestCase):
def test_get_cluster_version(self):