A set of Neutron drivers for the VMware NSX.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

5136 lines
243 KiB

# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import version
import xml.etree.ElementTree as et
import netaddr
from neutron_lib.agent import topics
from neutron_lib.api.definitions import address_scope
from neutron_lib.api.definitions import agent as agent_apidef
from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef
from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.definitions import dvr as dvr_apidef
from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo
from neutron_lib.api.definitions import extraroute
from neutron_lib.api.definitions import flavors as flavors_apidef
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import l3_flavors
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
from neutron_lib.api.definitions import network_availability_zone
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings as pbin
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib.api.definitions import router_availability_zone
from neutron_lib.api.definitions import subnet as subnet_def
from neutron_lib.api.definitions import vlantransparent as vlan_apidef
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib.api.validators import availability_zone as az_validator
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context as n_context
from neutron_lib.db import api as db_api
from neutron_lib.db import constants as db_const
from neutron_lib.db import resource_extend
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.exceptions import allowedaddresspairs as addr_exc
from neutron_lib.exceptions import flavors as flav_exc
from neutron_lib.exceptions import l3 as l3_exc
from neutron_lib.exceptions import multiprovidernet as mpnet_exc
from neutron_lib.exceptions import port_security as psec_exc
from neutron_lib.objects import registry as obj_reg
from neutron_lib.plugins import constants as plugin_const
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils
from neutron_lib import rpc as n_rpc
from neutron_lib.services.qos import constants as qos_consts
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as sa_exc
from neutron.api import extensions as neutron_extensions
from neutron.common import ipv6_utils
from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db.availability_zone import router as router_az_db
from neutron.db import dns_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db.models import l3 as l3_db_models
from neutron.db.models import securitygroup as securitygroup_model
from neutron.db import models_v2
from neutron.db import portsecurity_db
from neutron.db import securitygroups_db
from neutron.db import vlantransparent_db
from neutron.extensions import securitygroup as ext_sg
from neutron.quota import resource_registry
from neutron.services.flavors import flavors_plugin
from vmware_nsx.dvs import dvs
from vmware_nsx.services.qos.common import utils as qos_com_utils
from vmware_nsx.services.qos.nsx_v import driver as qos_driver
from vmware_nsx.services.qos.nsx_v import utils as qos_utils
import vmware_nsx
from vmware_nsx._i18n import _
from vmware_nsx.common import availability_zones as nsx_com_az
from vmware_nsx.common import config # noqa
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import l3_rpc_agent_api
from vmware_nsx.common import locking
from vmware_nsx.common import managers as nsx_managers
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import nsxv_constants
from vmware_nsx.common import utils as c_utils
from vmware_nsx.db import (
extended_security_group_rule as extend_sg_rule)
from vmware_nsx.db import (
routertype as rt_rtr)
from vmware_nsx.db import db as nsx_db
from vmware_nsx.db import extended_security_group as extended_secgroup
from vmware_nsx.db import maclearning as mac_db
from vmware_nsx.db import nsx_portbindings_db as pbin_db
from vmware_nsx.db import nsxv_db
from vmware_nsx.db import vnic_index_db
from vmware_nsx.extensions import (
advancedserviceproviders as as_providers)
from vmware_nsx.extensions import (
vnicindex as ext_vnic_idx)
from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu
from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain
from vmware_nsx.extensions import housekeeper as hk_ext
from vmware_nsx.extensions import maclearning as mac_ext
from vmware_nsx.extensions import nsxpolicy
from vmware_nsx.extensions import projectpluginmap
from vmware_nsx.extensions import providersecuritygroup as provider_sg
from vmware_nsx.extensions import routersize
from vmware_nsx.extensions import routertype
from vmware_nsx.extensions import secgroup_rule_local_ip_prefix
from vmware_nsx.extensions import securitygrouplogging as sg_logging
from vmware_nsx.extensions import securitygrouppolicy as sg_policy
from vmware_nsx.plugins.common.housekeeper import housekeeper
from vmware_nsx.plugins.common import plugin as nsx_plugin_common
from vmware_nsx.plugins.nsx import utils as tvd_utils
from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az
from vmware_nsx.plugins.nsx_v import managers
from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy
from vmware_nsx.plugins.nsx_v.vshield.common import (
constants as vcns_const)
from vmware_nsx.plugins.nsx_v.vshield.common import (
exceptions as vsh_exc)
from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver
from vmware_nsx.plugins.nsx_v.vshield import edge_utils
from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils
from vmware_nsx.plugins.nsx_v.vshield import vcns_driver
from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils
from vmware_nsx.services.fwaas.common import utils as fwaas_utils
from vmware_nsx.services.fwaas.nsx_v import fwaas_callbacks_v2
from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr
from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr
from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
from vmware_nsx.services.lbaas.octavia import constants as oct_const
from vmware_nsx.services.lbaas.octavia import octavia_listener
LOG = logging.getLogger(__name__)
PORTGROUP_PREFIX = 'dvportgroup'
ROUTER_SIZE = routersize.ROUTER_SIZE
VALID_EDGE_SIZES = routersize.VALID_EDGE_SIZES
SUBNET_RULE_NAME = 'Subnet Rule'
DNAT_RULE_NAME = 'DNAT Rule'
ALLOCATION_POOL_RULE_NAME = 'Allocation Pool Rule'
NO_SNAT_RULE_NAME = 'No SNAT Rule'
UNSUPPORTED_RULE_NAMED_PROTOCOLS = [constants.PROTO_NAME_DCCP,
constants.PROTO_NAME_PGM,
constants.PROTO_NAME_VRRP,
constants.PROTO_NAME_UDPLITE,
constants.PROTO_NAME_EGP,
constants.PROTO_NAME_IPIP,
constants.PROTO_NAME_OSPF,
constants.PROTO_NAME_IPV6_ROUTE,
constants.PROTO_NAME_IPV6_ENCAP,
constants.PROTO_NAME_IPV6_FRAG,
constants.PROTO_NAME_IPV6_OPTS,
constants.PROTO_NAME_IPV6_NONXT]
PROTOCOLS_SUPPORTING_PORTS = [constants.PROTO_NUM_TCP,
constants.PROTO_NUM_UDP,
constants.PROTO_NUM_ICMP,
constants.PROTO_NUM_IPV6_ICMP]
@resource_extend.has_resource_extenders
class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
agents_db.AgentDbMixin,
nsx_plugin_common.NsxPluginBase,
rt_rtr.RouterType_mixin,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
extradhcpopt_db.ExtraDhcpOptMixin,
router_az_db.RouterAvailabilityZoneMixin,
l3_gwmode_db.L3_NAT_db_mixin,
pbin_db.NsxPortBindingMixin,
portsecurity_db.PortSecurityDbMixin,
extend_sg_rule.ExtendedSecurityGroupRuleMixin,
securitygroups_db.SecurityGroupDbMixin,
extended_secgroup.ExtendedSecurityGroupPropertiesMixin,
vnic_index_db.VnicIndexDbMixin,
dns_db.DNSDbMixin, nsxpolicy.NsxPolicyPluginBase,
vlantransparent_db.Vlantransparent_db_mixin,
nsx_com_az.NSXAvailabilityZonesPluginCommon,
mac_db.MacLearningDbMixin,
hk_ext.Housekeeper):
supported_extension_aliases = [agent_apidef.ALIAS,
addr_apidef.ALIAS,
address_scope.ALIAS,
pbin.ALIAS,
ext_dns_search_domain.ALIAS,
dvr_apidef.ALIAS,
"ext-gw-mode",
mpnet_apidef.ALIAS,
psec.ALIAS,
pnet.ALIAS,
"quotas",
extnet_apidef.ALIAS,
ext_edo.ALIAS,
extraroute.ALIAS,
l3_apidef.ALIAS,
"security-group",
secgroup_rule_local_ip_prefix.ALIAS,
sg_logging.ALIAS,
routertype.ALIAS,
routersize.ALIAS,
ext_vnic_idx.ALIAS,
as_providers.ALIAS,
"subnet_allocation",
az_def.ALIAS,
network_availability_zone.ALIAS,
router_availability_zone.ALIAS,
l3_flavors.ALIAS,
flavors_apidef.ALIAS,
ext_dhcp_mtu.ALIAS,
mac_ext.ALIAS,
hk_ext.ALIAS,
"port-security-groups-filtering"]
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
@resource_registry.tracked_resources(
network=models_v2.Network,
port=models_v2.Port,
subnet=models_v2.Subnet,
subnetpool=models_v2.SubnetPool,
security_group=securitygroup_model.SecurityGroup,
security_group_rule=securitygroup_model.SecurityGroupRule,
router=l3_db_models.Router,
floatingip=l3_db_models.FloatingIP)
def __init__(self):
self._is_sub_plugin = tvd_utils.is_tvd_core_plugin()
self.init_is_complete = False
self.octavia_listener = None
self.octavia_stats_collector = None
self.housekeeper = None
super(NsxVPluginV2, self).__init__()
if self._is_sub_plugin:
extension_drivers = cfg.CONF.nsx_tvd.nsx_v_extension_drivers
else:
extension_drivers = cfg.CONF.nsx_extension_drivers
self._extension_manager = nsx_managers.ExtensionManager(
extension_drivers=extension_drivers)
# Bind the dummy L3 notifications
self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI()
self._extension_manager.initialize()
self.supported_extension_aliases.extend(
self._extension_manager.extension_aliases())
self.metadata_proxy_handler = None
config.validate_nsxv_config_options()
self._network_vlans = utils.parse_network_vlan_ranges(
cfg.CONF.nsxv.network_vlan_ranges)
neutron_extensions.append_api_extensions_path(
[vmware_nsx.NSX_EXT_PATH])
# This needs to be set prior to binding callbacks
if cfg.CONF.nsxv.use_dvs_features:
self._vcm = dvs.VCManager()
else:
self._vcm = None
# Create the client to interface with the NSX-v
_nsx_v_callbacks = edge_utils.NsxVCallbacks(self)
self.nsx_v = vcns_driver.VcnsDriver(_nsx_v_callbacks)
# Use the existing class instead of creating a new instance
self.lbv2_driver = self.nsx_v
# Ensure that edges do concurrency
self._ensure_lock_operations()
self._validate_nsx_version()
# Configure aggregate publishing
self._aggregate_publishing()
# Configure edge reservations
self._configure_reservations()
self.edge_manager = edge_utils.EdgeManager(self.nsx_v, self)
self.nsx_sg_utils = securitygroup_utils.NsxSecurityGroupUtils(
self.nsx_v)
self.init_availability_zones()
self._validate_config()
self._use_nsx_policies = False
if cfg.CONF.nsxv.use_nsx_policies:
if not c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()):
error = (_("NSX policies are not supported for version "
"%(ver)s.") %
{'ver': self.nsx_v.vcns.get_version()})
raise nsx_exc.NsxPluginException(err_msg=error)
# Support NSX policies in default security groups
self._use_nsx_policies = True
# enable the extension
self.supported_extension_aliases.append(sg_policy.ALIAS)
self.supported_extension_aliases.append(nsxpolicy.ALIAS)
# Support transparent VLANS from 6.3.0 onwards. The feature is only
# supported if the global configuration flag vlan_transparent is
# True
if cfg.CONF.vlan_transparent:
if c_utils.is_nsxv_version_6_3(self.nsx_v.vcns.get_version()):
self.supported_extension_aliases.append(vlan_apidef.ALIAS)
else:
LOG.warning("Transparent support only from "
"NSX 6.3 onwards")
self.sg_container_id = self._create_security_group_container()
self.default_section = self._create_cluster_default_fw_section()
self._router_managers = managers.RouterTypeManager(self)
# Make sure starting rpc listeners (for QoS and other agents)
# will happen only once
self.start_rpc_listeners_called = False
self.fwaas_callbacks = None
# Service insertion driver register
self._si_handler = fc_utils.NsxvServiceInsertionHandler(self)
registry.subscribe(self.add_vms_to_service_insertion,
fc_utils.SERVICE_INSERTION_RESOURCE,
events.AFTER_CREATE)
# Subscribe to subnet pools changes
registry.subscribe(
self.on_subnetpool_address_scope_updated,
resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE)
if c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()):
self.supported_extension_aliases.append(provider_sg.ALIAS)
# Bind QoS notifications
qos_driver.register(self)
registry.subscribe(self.spawn_complete,
resources.PROCESS,
events.AFTER_SPAWN)
# subscribe the init complete method last, so it will be called only
# if init was successful
registry.subscribe(self.init_complete,
resources.PROCESS,
events.AFTER_INIT)
@staticmethod
def plugin_type():
return projectpluginmap.NsxPlugins.NSX_V
@staticmethod
def is_tvd_plugin():
return False
def spawn_complete(self, resource, event, trigger, payload=None):
# Init the FWaaS support with RPC listeners for the original process
self._init_fwaas(with_rpc=True)
# The rest of this method should run only once, but after init_complete
if not self.init_is_complete:
self.init_complete(None, None, None)
if not self._is_sub_plugin:
self.octavia_stats_collector = (
octavia_listener.NSXOctaviaStatisticsCollector(
self,
self._get_octavia_stats_getter()))
def init_complete(self, resource, event, trigger, payload=None):
with locking.LockManager.get_lock('plugin-init-complete'):
if self.init_is_complete:
# Should be called only once per worker
return
has_metadata_cfg = (
cfg.CONF.nsxv.nova_metadata_ips and
cfg.CONF.nsxv.mgt_net_moid and
cfg.CONF.nsxv.mgt_net_proxy_ips and
cfg.CONF.nsxv.mgt_net_proxy_netmask)
if has_metadata_cfg:
# Init md_proxy handler per availability zone
self.metadata_proxy_handler = {}
for az in self.get_azs_list():
# create metadata handler only if the az supports it.
# if not, the global one will be used
if az.supports_metadata():
self.metadata_proxy_handler[az.name] = (
nsx_v_md_proxy.NsxVMetadataProxyHandler(
self, az))
LOG.debug('Metadata is configured for AZs %s',
self.metadata_proxy_handler.keys())
else:
LOG.debug('No metadata configuration available!')
self.housekeeper = housekeeper.NsxHousekeeper(
hk_ns='vmware_nsx.neutron.nsxv.housekeeper.jobs',
hk_jobs=cfg.CONF.nsxv.housekeeping_jobs,
hk_readonly=cfg.CONF.nsxv.housekeeping_readonly,
hk_readonly_jobs=cfg.CONF.nsxv.housekeeping_readonly_jobs)
# Init octavia listener and endpoints
if not self._is_sub_plugin:
octavia_objects = self._get_octavia_objects()
self.octavia_listener = octavia_listener.NSXOctaviaListener(
**octavia_objects)
# Init the FWaaS support without RPC listeners
# for the spawn workers
self._init_fwaas(with_rpc=False)
self.init_is_complete = True
def _get_octavia_objects(self):
return {
'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(
self.nsx_v),
'listener': listener_mgr.EdgeListenerManagerFromDict(self.nsx_v),
'pool': pool_mgr.EdgePoolManagerFromDict(self.nsx_v),
'member': member_mgr.EdgeMemberManagerFromDict(self.nsx_v),
'healthmonitor': healthmon_mgr.EdgeHealthMonitorManagerFromDict(
self.nsx_v),
'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(self.nsx_v),
'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict(self.nsx_v)}
def _get_octavia_stats_getter(self):
return listener_mgr.stats_getter
def _validate_nsx_version(self):
ver = self.nsx_v.vcns.get_version()
if version.LooseVersion(ver) < version.LooseVersion('6.2.3'):
error = _("Plugin version doesn't support NSX version %s.") % ver
raise nsx_exc.NsxPluginException(err_msg=error)
def get_metadata_proxy_handler(self, az_name):
if not self.metadata_proxy_handler:
return None
if az_name in self.metadata_proxy_handler:
return self.metadata_proxy_handler[az_name]
# fallback to the global handler
# Note(asarfaty): in case this is called during init_complete the
# default availability zone may still not exist.
return self.metadata_proxy_handler.get(nsx_az.DEFAULT_NAME)
def add_vms_to_service_insertion(self, sg_id):
def _add_vms_to_service_insertion(*args, **kwargs):
"""Adding existing VMs to the service insertion security group
Adding all current compute ports with port security to the service
insertion security group in order to classify their traffic by the
security redirect rules
"""
sg_id = args[0]
context = n_context.get_admin_context()
filters = {'device_owner': ['compute:None']}
ports = self.get_ports(context, filters=filters)
for port in ports:
# Only add compute ports with device-id, vnic & port security
if (validators.is_attr_set(
port.get(ext_vnic_idx.VNIC_INDEX)) and
validators.is_attr_set(port.get('device_id')) and
port[psec.PORTSECURITY]):
try:
vnic_idx = port[ext_vnic_idx.VNIC_INDEX]
device_id = port['device_id']
vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
self._add_member_to_security_group(sg_id, vnic_id)
except Exception as e:
LOG.info('Could not add port %(port)s to service '
'insertion security group. Exception '
'%(err)s',
{'port': port['id'], 'err': e})
# Doing this in a separate thread to not slow down the init process
# in case there are many compute ports
c_utils.spawn_n(_add_vms_to_service_insertion, sg_id)
def start_rpc_listeners(self):
if self.start_rpc_listeners_called:
# If called more than once - we should not create it again
return self.conn.consume_in_threads()
LOG.info("NSXV plugin: starting RPC listeners")
self.endpoints = [agents_db.AgentExtRpcCallback()]
self.topic = topics.PLUGIN
self.conn = n_rpc.Connection()
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
self.start_rpc_listeners_called = True
return self.conn.consume_in_threads()
def _init_fwaas(self, with_rpc):
# Bind FWaaS callbacks to the driver
if fwaas_utils.is_fwaas_v2_plugin_enabled():
LOG.info("NSXv FWaaS v2 plugin enabled")
self.fwaas_callbacks = fwaas_callbacks_v2.NsxvFwaasCallbacksV2(
with_rpc)
def _create_security_group_container(self):
name = "OpenStack Security Group container"
with locking.LockManager.get_lock('security-group-container-init'):
container_id = self.nsx_v.vcns.get_security_group_id(name)
if not container_id:
description = ("OpenStack Security Group Container, "
"managed by Neutron nsx-v plugin.")
container = {"securitygroup": {"name": name,
"description": description}}
h, container_id = (
self.nsx_v.vcns.create_security_group(container))
return container_id
def _find_router_driver(self, context, router_id):
router_qry = context.session.query(l3_db_models.Router)
router_db = router_qry.filter_by(id=router_id).one()
return self._get_router_driver(context, router_db)
def _get_router_driver(self, context, router_db):
router_type_dict = {}
self._extend_nsx_router_dict(router_type_dict, router_db)
router_type = None
if router_type_dict.get("distributed", False):
router_type = "distributed"
else:
router_type = router_type_dict.get("router_type")
return self._router_managers.get_tenant_router_driver(
context, router_type)
def _decide_router_type(self, context, r):
router_type = None
if (validators.is_attr_set(r.get("distributed")) and
r.get("distributed")):
router_type = "distributed"
if validators.is_attr_set(r.get("router_type")):
err_msg = _('Can not support router_type extension for '
'distributed router')
raise n_exc.InvalidInput(error_message=err_msg)
elif validators.is_attr_set(r.get("router_type")):
router_type = r.get("router_type")
router_type = self._router_managers.decide_tenant_router_type(
context, router_type)
if router_type == "distributed":
r["distributed"] = True
r["router_type"] = "exclusive"
else:
r["distributed"] = False
r["router_type"] = router_type
@staticmethod
@resource_extend.extends([l3_apidef.ROUTERS])
def _extend_nsx_router_dict(router_res, router_db):
router_type_obj = rt_rtr.RouterType_mixin()
router_type_obj._extend_nsx_router_dict(
router_res, router_db, router_type_obj.nsx_attributes)
def _get_cluster_default_fw_section_rules(self):
"""Build Default cluster rules"""
rules = [{'name': 'Default DHCP rule for OS Security Groups',
'action': 'allow',
'services': [('17', '67', None, None),
('17', '68', None, None)]},
{'name': 'Default ICMPv6 rule for OS Security Groups',
'action': 'allow',
'services': [('58', None,
constants.ICMPV6_TYPE_NS, None),
('58', None,
constants.ICMPV6_TYPE_NA, None),
('58', None,
constants.ICMPV6_TYPE_RA, None),
('58', None,
constants.ICMPV6_TYPE_MLD_QUERY, None)]},
{'name': 'Default DHCPv6 rule for OS Security Groups',
'action': 'allow',
'services': [('17', '546', None, None),
('17', '547', None, None)]}]
if cfg.CONF.nsxv.cluster_moid:
applied_to_ids = cfg.CONF.nsxv.cluster_moid
applied_to_type = 'ClusterComputeResource'
else:
applied_to_ids = [self.sg_container_id]
applied_to_type = 'SecurityGroup'
rule_list = []
for rule in rules:
rule_config = self.nsx_sg_utils.get_rule_config(
applied_to_ids, rule['name'], rule['action'],
applied_to_type, services=rule['services'],
logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic)
rule_list.append(rule_config)
igmp_names = ['IGMP Membership Query', 'IGMP V2 Membership Report',
'IGMP V3 Membership Report', 'IGMP Leave Group']
igmp_ids = []
for name in igmp_names:
igmp_id = self._get_appservice_id(name)
if igmp_id:
igmp_ids.append(igmp_id)
if igmp_ids:
rules = [{'name': 'Default IGMP rule for OS Security Groups',
'action': 'allow',
'service_ids': igmp_ids}]
for rule in rules:
rule_config = self.nsx_sg_utils.get_rule_config(
applied_to_ids, rule['name'], rule['action'],
applied_to_type,
application_services=rule['service_ids'],
logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic)
rule_list.append(rule_config)
# Default security-group rules
block_rule = self.nsx_sg_utils.get_rule_config(
[self.sg_container_id], 'Block All', 'deny',
logged=cfg.CONF.nsxv.log_security_groups_blocked_traffic)
rule_list.append(block_rule)
return rule_list
def _create_cluster_default_fw_section(self, update_section=False):
section_name = 'OS Cluster Security Group section'
with locking.LockManager.get_lock('default-section-init'):
section_id = self.nsx_v.vcns.get_section_id(section_name)
if section_id and not update_section:
# No need to update an existing section, unless the
# configuration changed
return section_id
rule_list = self._get_cluster_default_fw_section_rules()
section = self.nsx_sg_utils.get_section_with_rules(
section_name, rule_list, section_id)
section_req_body = self.nsx_sg_utils.to_xml_string(section)
if section_id:
self.nsx_v.vcns.update_section_by_id(
section_id, 'ip', section_req_body)
else:
# cluster section does not exists. Create it above the
# default l3 section
try:
l3_id = self.nsx_v.vcns.get_default_l3_id()
h, c = self.nsx_v.vcns.create_section(
'ip', section_req_body, insert_before=l3_id)
section_id = self.nsx_sg_utils.parse_and_get_section_id(c)
except Exception as e:
# another controller might have already created one
section_id = self.nsx_v.vcns.get_section_id(section_name)
if not section_id:
with excutils.save_and_reraise_exception():
LOG.error("Failed to create default section: %s",
e)
return section_id
def _create_dhcp_static_binding(self, context, neutron_port_db):
network_id = neutron_port_db['network_id']
device_owner = neutron_port_db['device_owner']
if device_owner.startswith("compute"):
s_bindings = self.edge_manager.create_static_binding(
context, neutron_port_db)
self.edge_manager.create_dhcp_bindings(
context, neutron_port_db['id'], network_id, s_bindings)
def _delete_dhcp_static_binding(self, context, neutron_port_db,
log_error=True):
port_id = neutron_port_db['id']
network_id = neutron_port_db['network_id']
try:
self.edge_manager.delete_dhcp_binding(
context, port_id, network_id, neutron_port_db['mac_address'])
except Exception as e:
msg = ("Unable to delete static bindings for port %(id)s"
"Error: %(e)s" % {'id': port_id, 'e': e})
if log_error:
LOG.error(msg)
else:
LOG.info(msg)
def _validate_network_qos(self, context, network, backend_network):
err_msg = None
if validators.is_attr_set(network.get(qos_consts.QOS_POLICY_ID)):
if not backend_network:
err_msg = (_("Cannot configure QOS on external networks"))
if not cfg.CONF.nsxv.use_dvs_features:
err_msg = (_("Cannot configure QOS "
"without enabling use_dvs_features"))
if err_msg:
raise n_exc.InvalidInput(error_message=err_msg)
self._validate_qos_policy_id(
context, network.get(qos_consts.QOS_POLICY_ID))
def _get_network_az_from_net_data(self, net_data):
if az_def.AZ_HINTS in net_data and net_data[az_def.AZ_HINTS]:
return self._availability_zones_data.get_availability_zone(
net_data[az_def.AZ_HINTS][0])
return self.get_default_az()
def _get_network_az_dvs_id(self, net_data):
az = self._get_network_az_from_net_data(net_data)
return az.dvs_id
def _get_network_vdn_scope_id(self, net_data):
az = self._get_network_az_from_net_data(net_data)
return az.vdn_scope_id
def _validate_dvs_id(self, dvs_id):
if not self.nsx_v.vcns.validate_dvs(
dvs_id, dvs_list=self.existing_dvs):
# try to retrieve the dvs list again in case 1 was added
self.existing_dvs = self.nsx_v.vcns.get_dvs_list()
if not self.nsx_v.vcns.validate_dvs(
dvs_id, dvs_list=self.existing_dvs):
return False
return True
def _validate_provider_create(self, context, network):
if not validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
return
az_dvs = self._get_network_az_dvs_id(network)
for segment in network[mpnet_apidef.SEGMENTS]:
network_type = segment.get(pnet.NETWORK_TYPE)
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
network_type_set = validators.is_attr_set(network_type)
segmentation_id_set = validators.is_attr_set(segmentation_id)
physical_network_set = validators.is_attr_set(physical_network)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type == c_utils.NsxVNetworkTypes.FLAT:
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
if physical_network_set:
# Validate the DVS Id
if not self._validate_dvs_id(physical_network):
err_msg = (_("DVS Id %s could not be found") %
physical_network)
elif network_type == c_utils.NsxVNetworkTypes.VLAN:
if not segmentation_id_set:
if physical_network_set:
if physical_network not in self._network_vlans:
err_msg = _("Invalid physical network for "
"segmentation ID allocation")
else:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
elif (segmentation_id_set and physical_network_set and
not self._validate_dvs_id(physical_network)):
err_msg = (_("DVS Id %s could not be found") %
physical_network)
else:
# Verify segment is not already allocated
bindings = nsxv_db.get_network_bindings_by_vlanid(
context.session, segmentation_id)
if bindings:
dvs_ids = self._get_dvs_ids(physical_network,
az_dvs)
for phy_uuid in dvs_ids:
for binding in bindings:
if binding['phy_uuid'] == phy_uuid:
raise n_exc.VlanIdInUse(
vlan_id=segmentation_id,
physical_network=phy_uuid)
elif network_type == c_utils.NsxVNetworkTypes.VXLAN:
# Currently unable to set the segmentation id
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be set with VXLAN")
elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP:
external = network.get(extnet_apidef.EXTERNAL)
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be set with portgroup")
if not physical_network_set:
err_msg = _("Physical network must be set")
elif not self.nsx_v.vcns.validate_network(physical_network):
err_msg = _("Physical network doesn't exist")
# A provider network portgroup will need the network name to
# match the portgroup name
elif ((not validators.is_attr_set(external) or
validators.is_attr_set(external) and not external) and
not self.nsx_v.vcns.validate_network_name(
physical_network, network['name'])):
err_msg = _("Portgroup name must match network name")
if not err_msg:
# make sure no other neutron network is using it
bindings = (
nsxv_db.get_network_bindings_by_physical_net_and_type(
context.elevated().session, physical_network,
network_type))
if bindings:
err_msg = (_('protgroup %s is already used by '
'another network') % physical_network)
else:
err_msg = (_("%(net_type_param)s %(net_type_value)s not "
"supported") %
{'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type})
if err_msg:
raise n_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _validate_network_type(self, context, network_id, net_types):
bindings = nsxv_db.get_network_bindings(context.session,
network_id)
multiprovider = nsx_db.is_multiprovider_network(context.session,
network_id)
if bindings:
if not multiprovider:
return bindings[0].binding_type in net_types
for binding in bindings:
if binding.binding_type not in net_types:
return False
return True
return False
def _extend_network_dict_provider(self, context, network,
multiprovider=None, bindings=None):
if 'id' not in network:
return
if not bindings:
bindings = nsxv_db.get_network_bindings(context.session,
network['id'])
if not multiprovider:
multiprovider = nsx_db.is_multiprovider_network(context.session,
network['id'])
# With NSX plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type
if bindings:
if not multiprovider:
# network came in through provider networks api
network[pnet.NETWORK_TYPE] = bindings[0].binding_type
network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
else:
# network come in though multiprovider networks api
network[mpnet_apidef.SEGMENTS] = [
{pnet.NETWORK_TYPE: binding.binding_type,
pnet.PHYSICAL_NETWORK: binding.phy_uuid,
pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings]
# update availability zones
network[az_def.COLLECTION_NAME] = (
self._get_network_availability_zones(context, network))
def _get_subnet_as_providers(self, context, subnet, nw_dict=None):
net_id = subnet.get('network_id')
if net_id is None:
net_id = self.get_subnet(context, subnet['id']).get('network_id')
if nw_dict:
providers = nw_dict.get(net_id, [])
else:
as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch(
context.session, net_id)
providers = [asp['edge_id'] for asp in as_provider_data]
return providers
def get_subnet(self, context, id, fields=None):
subnet = super(NsxVPluginV2, self).get_subnet(context, id, fields)
if not context.is_admin:
return subnet
if fields and as_providers.ADV_SERVICE_PROVIDERS in fields:
subnet[as_providers.ADV_SERVICE_PROVIDERS] = (
self._get_subnet_as_providers(context, subnet))
return subnet
def get_subnets(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
subnets = super(NsxVPluginV2, self).get_subnets(context, filters,
fields, sorts, limit,
marker, page_reverse)
if not context.is_admin or (not filters and not fields):
return subnets
new_subnets = []
if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or
(filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))):
# This ugly mess should reduce DB calls with network_id field
# as filter - as network_id is not indexed
vnic_binds = nsxv_db.get_edge_vnic_bindings_with_networks(
context.session)
nw_dict = {}
for vnic_bind in vnic_binds:
if nw_dict.get(vnic_bind['network_id']):
nw_dict[vnic_bind['network_id']].append(
vnic_bind['edge_id'])
else:
nw_dict[vnic_bind['network_id']] = [vnic_bind['edge_id']]
# We only deal metadata provider field when:
# - adv_service_provider is explicitly retrieved
# - adv_service_provider is used in a filter
for subnet in subnets:
as_provider = self._get_subnet_as_providers(
context, subnet, nw_dict)
md_filter = (
None if filters is None
else filters.get(as_providers.ADV_SERVICE_PROVIDERS))
if md_filter is None or len(set(as_provider) & set(md_filter)):
# Include metadata_providers only if requested in results
if fields and as_providers.ADV_SERVICE_PROVIDERS in fields:
subnet[as_providers.ADV_SERVICE_PROVIDERS] = (
as_provider)
new_subnets.append(subnet)
else:
# No need to handle metadata providers field
return subnets
return new_subnets
def _convert_to_transport_zones_dict(self, network):
"""Converts the provider request body to multiprovider.
Returns: True if request is multiprovider False if provider
and None if neither.
"""
if any(validators.is_attr_set(network.get(f))
for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)):
if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
raise mpnet_exc.SegmentsSetInConjunctionWithProviders()
# convert to transport zone list
network[mpnet_apidef.SEGMENTS] = [
{pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
del network[pnet.NETWORK_TYPE]
del network[pnet.PHYSICAL_NETWORK]
del network[pnet.SEGMENTATION_ID]
return False
if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)):
return True
def _delete_backend_network(self, moref, dvs_id=None):
"""Deletes the backend NSX network.
This can either be a VXLAN or a VLAN network. The type is determined
by the prefix of the moref.
The dvs_id is relevant only if it is a vlan network
"""
if moref.startswith(PORTGROUP_PREFIX):
self.nsx_v.delete_port_group(dvs_id, moref)
else:
self.nsx_v.delete_virtual_wire(moref)
def _get_vlan_network_name(self, net_data, dvs_id):
if net_data.get('name') is None:
net_data['name'] = ''
# Maximum name length is 80 characters. 'id' length is 36
# maximum prefix for name plus dvs-id is 43
if net_data['name'] == '':
prefix = dvs_id[:43]
else:
prefix = ('%s-%s' % (dvs_id, net_data['name']))[:43]
return '%s-%s' % (prefix, net_data['id'])
def _update_network_teaming(self, dvs_id, net_id, net_moref):
if self._vcm:
try:
h, switch = self.nsx_v.vcns.get_vdn_switch(dvs_id)
except Exception:
LOG.warning('DVS %s not registered on NSX. Unable to '
'update teaming for network %s',
dvs_id, net_id)
return
try:
self._vcm.update_port_groups_config(
dvs_id, net_id, net_moref,
self._vcm.update_port_group_spec_teaming,
switch)
except Exception as e:
LOG.error('Unable to update teaming information for '
'net %(net_id)s. Error: %(e)s',
{'net_id': net_id, 'e': e})
def _create_vlan_network_at_backend(self, net_data, dvs_id):
network_name = self._get_vlan_network_name(net_data, dvs_id)
segment = net_data[mpnet_apidef.SEGMENTS][0]
vlan_tag = 0
if (segment.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.VLAN):
vlan_tag = segment.get(pnet.SEGMENTATION_ID, 0)
portgroup = {'vlanId': vlan_tag,
'networkBindingType': 'Static',
'networkName': network_name,
'networkType': 'Isolation'}
config_spec = {'networkSpec': portgroup}
try:
h, c = self.nsx_v.vcns.create_port_group(dvs_id,
config_spec)
except Exception as e:
error = (_("Failed to create port group on DVS: %(dvs_id)s. "
"Reason: %(reason)s") % {'dvs_id': dvs_id,
'reason': e.response})
raise nsx_exc.NsxPluginException(err_msg=error)
self._update_network_teaming(dvs_id, net_data['id'], c)
return c
def _get_dvs_ids(self, physical_network, default_dvs):
"""Extract DVS-IDs provided in the physical network field.
If physical network attribute is not set, return the pre configured
dvs-id from nsx.ini file, otherwise convert physical network string
to a list of unique DVS-IDs.
"""
if not validators.is_attr_set(physical_network):
return [default_dvs]
# Return unique DVS-IDs only and ignore duplicates
return list(set(
dvs.strip() for dvs in physical_network.split(',') if dvs))
def _add_member_to_security_group(self, sg_id, vnic_id):
with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)):
try:
self.nsx_v.vcns.add_member_to_security_group(
sg_id, vnic_id)
LOG.info("Added %(sg_id)s member to NSX security "
"group %(vnic_id)s",
{'sg_id': sg_id, 'vnic_id': vnic_id})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("NSX security group %(sg_id)s member add "
"failed %(vnic_id)s.",
{'sg_id': sg_id,
'vnic_id': vnic_id})
def _add_security_groups_port_mapping(self, session, vnic_id,
added_sgids):
if vnic_id is None or added_sgids is None:
return
for add_sg in added_sgids:
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg,
moref=True)
if nsx_sg_id is None:
LOG.warning("NSX security group not found for %s", add_sg)
else:
self._add_member_to_security_group(nsx_sg_id, vnic_id)
def _remove_member_from_security_group(self, sg_id, vnic_id):
with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)):
try:
h, c = self.nsx_v.vcns.remove_member_from_security_group(
sg_id, vnic_id)
except Exception:
LOG.debug("NSX security group %(nsx_sg_id)s member "
"delete failed %(vnic_id)s",
{'nsx_sg_id': sg_id,
'vnic_id': vnic_id})
def _delete_security_groups_port_mapping(self, session, vnic_id,
deleted_sgids):
if vnic_id is None or deleted_sgids is None:
return
# Remove vnic from delete security groups binding
for del_sg in deleted_sgids:
nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg,
moref=True)
if nsx_sg_id is None:
LOG.warning("NSX security group not found for %s", del_sg)
else:
self._remove_member_from_security_group(nsx_sg_id, vnic_id)
def _update_security_groups_port_mapping(self, session, port_id,
vnic_id, current_sgids,
new_sgids):
new_sgids = new_sgids or []
current_sgids = current_sgids or []
# If no vnic binding is found, nothing can be done, so return
if vnic_id is None:
return
deleted_sgids = set()
added_sgids = set()
# Find all delete security group from port binding
for curr_sg in current_sgids:
if curr_sg not in new_sgids:
deleted_sgids.add(curr_sg)
# Find all added security group from port binding
for new_sg in new_sgids:
if new_sg not in current_sgids:
added_sgids.add(new_sg)
self._delete_security_groups_port_mapping(session, vnic_id,
deleted_sgids)
self._add_security_groups_port_mapping(session, vnic_id,
added_sgids)
def _get_port_vnic_id(self, port_index, device_id):
# The vnic-id format which is expected by NSXv
return '%s.%03d' % (device_id, port_index)
def init_availability_zones(self):
self._availability_zones_data = nsx_az.NsxVAvailabilityZones(
use_tvd_config=self._is_sub_plugin)
def _list_availability_zones(self, context, filters=None):
result = {}
for az in self.get_azs_names():
# Add this availability zone as a router & network resource
if filters:
if 'name' in filters and az not in filters['name']:
continue
for res in ['network', 'router']:
if 'resource' not in filters or res in filters['resource']:
result[(az, res)] = True
return result
def _validate_availability_zones_in_obj(self, context, resource_type,
obj_data):
if az_def.AZ_HINTS in obj_data:
self.validate_availability_zones(context, resource_type,
obj_data[az_def.AZ_HINTS],
force=True)
def validate_availability_zones(self, context, resource_type,
availability_zones, force=False):
"""Verify that the availability zones exist, and only 1 hint
was set.
"""
# This method is called directly from this plugin but also from
# registered callbacks
if self._is_sub_plugin and not force:
# validation should be done together for both plugins
return
return self.validate_obj_azs(availability_zones)
def _prepare_spoofguard_policy(self, network_type, net_data, net_morefs):
# The method will determine if a portgroup is already assigned to a
# spoofguard policy. If so, it will return the predefined policy. If
# not a new spoofguard policy will be created
if network_type == c_utils.NsxVNetworkTypes.PORTGROUP:
pcs = self.nsx_v.vcns.get_spoofguard_policies()[1].get('policies',
[])
for policy in pcs:
for ep in policy['enforcementPoints']:
if ep['id'] == net_morefs[0]:
return policy['policyId'], True
LOG.warning("No spoofguard policy will be created for %s",
net_data['id'])
return None, False
# Always use enabled spoofguard policy. ports with disabled port
# security will be added to the exclude list
sg_policy_id = self.nsx_v.vcns.create_spoofguard_policy(
net_morefs, net_data['id'], True)[1]
return sg_policy_id, False
def _get_physical_network(self, network_type, net_data):
if network_type == c_utils.NsxVNetworkTypes.VXLAN:
return self._get_network_vdn_scope_id(net_data)
# Use the dvs_id of the availability zone
return self._get_network_az_dvs_id(net_data)
def _generate_segment_id(self, context, physical_network, net_data):
bindings = nsxv_db.get_network_bindings_by_physical_net(
context.session, physical_network)
vlan_ranges = self._network_vlans.get(physical_network, [])
if vlan_ranges:
vlan_ids = set()
for vlan_min, vlan_max in vlan_ranges:
vlan_ids |= set(range(vlan_min, vlan_max + 1))
else:
vlan_min = constants.MIN_VLAN_TAG
vlan_max = constants.MAX_VLAN_TAG
vlan_ids = set(range(vlan_min, vlan_max + 1))
used_ids_in_range = set([binding.vlan_id for binding in bindings
if binding.vlan_id in vlan_ids])
free_ids = list(vlan_ids ^ used_ids_in_range)
if len(free_ids) == 0:
raise n_exc.NoNetworkAvailable()
net_data[mpnet_apidef.SEGMENTS][0][pnet.SEGMENTATION_ID] = free_ids[0]
def create_network(self, context, network):
net_data = network['network']
tenant_id = net_data['tenant_id']
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
provider_type = self._convert_to_transport_zones_dict(net_data)
self._validate_provider_create(context, net_data)
self._validate_availability_zones_in_obj(context, 'network', net_data)
net_data['id'] = str(uuidutils.generate_uuid())
external = net_data.get(extnet_apidef.EXTERNAL)
backend_network = (not validators.is_attr_set(external) or
validators.is_attr_set(external) and not external)
network_type = None
generate_segmenation_id = False
lock_vlan_creation = False
if provider_type is not None:
segment = net_data[mpnet_apidef.SEGMENTS][0]
network_type = segment.get(pnet.NETWORK_TYPE)
if network_type == c_utils.NsxVNetworkTypes.VLAN:
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
if physical_network in self._network_vlans:
lock_vlan_creation = True
if not validators.is_attr_set(
segment.get(pnet.SEGMENTATION_ID)):
generate_segmenation_id = True
if lock_vlan_creation:
with locking.LockManager.get_lock(
'vlan-networking-%s' % physical_network):
if generate_segmenation_id:
self._generate_segment_id(context, physical_network,
net_data)
else:
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
if nsxv_db.get_network_bindings_by_ids(context.session,
segmentation_id, physical_network):
raise n_exc.VlanIdInUse(
vlan_id=segmentation_id,
physical_network=physical_network)
return self._create_network(context, network, net_data,
provider_type, external,
backend_network, network_type)
else:
return self._create_network(context, network, net_data,
provider_type, external,
backend_network, network_type)
def _create_network(self, context, network, net_data,
provider_type, external, backend_network,
network_type):
# A external network should be created in the case that we have a flat,
# vlan or vxlan network. For port groups we do not make any changes.
external_backend_network = (
external and provider_type is not None and
network_type != c_utils.NsxVNetworkTypes.PORTGROUP)
self._validate_network_qos(context, net_data, backend_network)
# Update the transparent vlan if configured
vlt = False
if extensions.is_extension_supported(self, 'vlan-transparent'):
vlt = vlan_apidef.get_vlan_transparent(net_data)
if backend_network or external_backend_network:
#NOTE(abhiraut): Consider refactoring code below to have more
# readable conditions.
if (provider_type is None or
network_type == c_utils.NsxVNetworkTypes.VXLAN):
virtual_wire = {"name": net_data['id'],
"tenantId": "virtual wire tenant"}
if vlt:
virtual_wire["guestVlanAllowed"] = True
config_spec = {"virtualWireCreateSpec": virtual_wire}
vdn_scope_id = self._get_network_vdn_scope_id(net_data)
if provider_type is not None:
segment = net_data[mpnet_apidef.SEGMENTS][0]
if validators.is_attr_set(
segment.get(pnet.PHYSICAL_NETWORK)):
vdn_scope_id = segment.get(pnet.PHYSICAL_NETWORK)
if not (self.nsx_v.vcns.
validate_vdn_scope(vdn_scope_id)):
raise nsx_exc.NsxResourceNotFound(
res_name='vdn_scope_id',
res_id=vdn_scope_id)
h, c = self.nsx_v.vcns.create_virtual_wire(vdn_scope_id,
config_spec)
net_morefs = [c]
dvs_net_ids = [net_data['id']]
elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP:
if vlt:
raise NotImplementedError(_("Transparent support only "
"for VXLANs"))
segment = net_data[mpnet_apidef.SEGMENTS][0]
net_morefs = [segment.get(pnet.PHYSICAL_NETWORK)]
dvs_net_ids = [net_data['name']]
else:
segment = net_data[mpnet_apidef.SEGMENTS][0]
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
# Retrieve the list of dvs-ids from physical network.
# If physical_network attr is not set, retrieve a list
# consisting of a single dvs-id pre-configured in nsx.ini
az_dvs = self._get_network_az_dvs_id(net_data)
dvs_ids = self._get_dvs_ids(physical_network, az_dvs)
dvs_net_ids = []
# Save the list of netmorefs from the backend
net_morefs = []
dvs_pg_mappings = {}
for dvs_id in dvs_ids:
try:
net_moref = self._create_vlan_network_at_backend(
dvs_id=dvs_id,
net_data=net_data)
except nsx_exc.NsxPluginException:
with excutils.save_and_reraise_exception():
# Delete VLAN networks on other DVSes if it
# fails to be created on one DVS and reraise
# the original exception.
for dvsmoref, netmoref in dvs_pg_mappings.items():
self._delete_backend_network(
netmoref, dvsmoref)
dvs_pg_mappings[dvs_id] = net_moref
net_morefs.append(net_moref)
dvs_net_ids.append(self._get_vlan_network_name(
net_data, dvs_id))
if vlt:
try:
self._vcm.update_port_groups_config(
dvs_id, net_data['id'], net_moref,
self._vcm.update_port_group_spec_trunk,
{})
except Exception:
with excutils.save_and_reraise_exception():
# Delete VLAN networks on other DVSes if it
# fails to be created on one DVS and reraise
# the original exception.
for dvsm, netm in dvs_pg_mappings.items():
self._delete_backend_network(netm, dvsm)
try:
net_data[psec.PORTSECURITY] = net_data.get(psec.PORTSECURITY, True)
if not cfg.CONF.nsxv.spoofguard_enabled:
LOG.info("Network %s will have port security disabled",
net_data['id'])
net_data[psec.PORTSECURITY] = False
# Create SpoofGuard policy for network anti-spoofing
# allow_multiple_addresses will be overridden in case the user
# requires allowing multiple or cidr-based allowed address pairs
# defined per port but doesn't want to disable spoofguard globally
sg_policy_id = None
allow_multiple_addresses = (not net_data[psec.PORTSECURITY] and
cfg.CONF.
nsxv.allow_multiple_ip_addresses)
if (cfg.CONF.nsxv.spoofguard_enabled and backend_network and not
allow_multiple_addresses):
# This variable is set as the method below may result in a
# exception and we may need to rollback
predefined = False
sg_policy_id, predefined = self._prepare_spoofguard_policy(
network_type, net_data, net_morefs)
with db_api.CONTEXT_WRITER.using(context):
new_net = super(NsxVPluginV2, self).create_network(context,
network)
self._extension_manager.process_create_network(
context, net_data, new_net)
# Process port security extension
self._process_network_port_security_create(
context, net_data, new_net)
if vlt:
super(NsxVPluginV2, self).update_network(context,
new_net['id'],
{'network': {'vlan_transparent': vlt}})
# update the network with the availability zone hints
if az_def.AZ_HINTS in net_data:
az_hints = az_validator.convert_az_list_to_string(
net_data[az_def.AZ_HINTS])
super(NsxVPluginV2, self).update_network(context,
new_net['id'],
{'network': {az_def.AZ_HINTS: az_hints}})
new_net[az_def.AZ_HINTS] = az_hints
# still no availability zones until subnets creation
new_net[az_def.COLLECTION_NAME] = []
# DB Operations for setting the network as external
self._process_l3_create(context, new_net, net_data)
if (net_data.get(mpnet_apidef.SEGMENTS) and
isinstance(provider_type, bool)):
net_bindings = []
for tz in net_data[mpnet_apidef.SEGMENTS]:
network_type = tz.get(pnet.NETWORK_TYPE)
segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0)
segmentation_id_set = validators.is_attr_set(
segmentation_id)
if not segmentation_id_set:
segmentation_id = 0
physical_network = tz.get(pnet.PHYSICAL_NETWORK, '')
physical_net_set = validators.is_attr_set(
physical_network)
if not physical_net_set:
if external_backend_network:
physical_network = net_morefs[0]
else:
physical_network = self._get_physical_network(
network_type, net_data)
net_bindings.append(nsxv_db.add_network_binding(
context.session, new_net['id'],
network_type,
physical_network,
segmentation_id))
if provider_type:
nsx_db.set_multiprovider_network(context.session,
new_net['id'])
self._extend_network_dict_provider(context, new_net,
provider_type,
net_bindings)
if backend_network or external_backend_network:
# Save moref in the DB for future access
if (network_type == c_utils.NsxVNetworkTypes.VLAN or
network_type == c_utils.NsxVNetworkTypes.FLAT):
# Save netmoref to dvs id mappings for VLAN network
# type for future access.
for dvs_id, netmoref in dvs_pg_mappings.items():
nsx_db.add_neutron_nsx_network_mapping(
session=context.session,
neutron_id=new_net['id'],
nsx_switch_id=netmoref,
dvs_id=dvs_id)
else:
for net_moref in net_morefs:
nsx_db.add_neutron_nsx_network_mapping(
context.session, new_net['id'],
net_moref)
if (cfg.CONF.nsxv.spoofguard_enabled and
backend_network and sg_policy_id):
nsxv_db.map_spoofguard_policy_for_network(
context.session, new_net['id'], sg_policy_id)
except Exception:
with excutils.save_and_reraise_exception():
# Delete the backend network
if backend_network or external_backend_network:
if (cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id and
not predefined):
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
# Ensure that an predefined portgroup will not be deleted
if network_type == c_utils.NsxVNetworkTypes.VXLAN:
for net_moref in net_morefs:
self._delete_backend_network(net_moref)
elif (network_type and
network_type != c_utils.NsxVNetworkTypes.PORTGROUP):
for dvsmrf, netmrf in dvs_pg_mappings.items():
self._delete_backend_network(netmrf, dvsmrf)
LOG.exception('Failed to create network')
# If init is incomplete calling _update_qos_network() will result a
# deadlock.
# That situation happens when metadata init is creating a network
# on its 1st execution.
# Therefore we skip this code during init.
if backend_network and self.init_is_complete:
# Update the QOS restrictions of the backend network
self._update_qos_on_created_network(context, net_data, new_net)
# this extra lookup is necessary to get the
# latest db model for the extension functions
net_model = self._get_network(context, new_net['id'])
resource_extend.apply_funcs('networks', new_net, net_model)
return new_net
def _update_qos_on_created_network(self, context, net_data, new_net):
qos_policy_id = qos_com_utils.set_qos_policy_on_new_net(
context, net_data, new_net, allow_external=True)
if qos_policy_id:
# update the QoS data on the backend
self._update_qos_on_backend_network(
context, net_data['id'], qos_policy_id)
def _update_qos_on_backend_network(self, context, net_id, qos_policy_id):
# Translate the QoS rule data into Nsx values
qos_data = qos_utils.NsxVQosRule(
context=context, qos_policy_id=qos_policy_id)
# default dvs for this network
az = self.get_network_az_by_net_id(context, net_id)
az_dvs_id = az.dvs_id
# get the network moref/s from the db
net_mappings = nsx_db.get_nsx_network_mappings(
context.session, net_id)
for mapping in net_mappings:
# update the qos restrictions of the network
self._vcm.update_port_groups_config(
mapping.dvs_id or az_dvs_id,
net_id, mapping.nsx_id,
self._vcm.update_port_group_spec_qos, qos_data)
def _cleanup_dhcp_edge_before_deletion(self, context, net_id):
if self.metadata_proxy_handler:
# Find if this is the last network which is bound
# to DHCP Edge. If it is - cleanup Edge metadata config
dhcp_edge = nsxv_db.get_dhcp_edge_network_binding(
context.session, net_id)
if dhcp_edge:
edge_vnics = nsxv_db.get_edge_vnic_bindings_by_edge(
context.session, dhcp_edge['edge_id'])
# If the DHCP Edge is connected to two networks:
# the deleted network and the inter-edge network, we can delete
# the inter-edge interface
if len(edge_vnics) == 2:
rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge(
context.session, dhcp_edge['edge_id'])
if rtr_binding:
rtr_id = rtr_binding['router_id']
az_name = rtr_binding['availability_zone']
md_proxy = self.get_metadata_proxy_handler(az_name)
if md_proxy:
md_proxy.cleanup_router_edge(context, rtr_id)
else:
self.edge_manager.reconfigure_shared_edge_metadata_port(
context, (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36])
def _is_neutron_spoofguard_policy(self, net_id, moref, policy_id):
# A neutron policy will have the network UUID as the name of the
# policy
try:
policy = self.nsx_v.vcns.get_spoofguard_policy(policy_id)[1]
except Exception:
LOG.error("Policy does not exists for %s", policy_id)
# We will not attempt to delete a policy that does not exist
return False
if policy:
for ep in policy['enforcementPoints']:
if ep['id'] == moref and policy['name'] == net_id:
return True
return False
def _validate_internal_network(self, context, network_id):
if nsxv_db.get_nsxv_internal_network_by_id(
context.elevated().session, network_id):
msg = (_("Cannot delete internal network %s or its subnets and "
"ports") % network_id)
raise n_exc.InvalidInput(error_message=msg)
def delete_network(self, context, id):
mappings = nsx_db.get_nsx_network_mappings(context.session, id)
bindings = nsxv_db.get_network_bindings(context.session, id)
if cfg.CONF.nsxv.spoofguard_enabled:
sg_policy_id = nsxv_db.get_spoofguard_policy_id(
context.session, id)
self._validate_internal_network(context, id)
# Update the DHCP edge for metadata and clean the vnic in DHCP edge
# if there is only no other existing port besides DHCP port
filters = {'network_id': [id]}
ports = self.get_ports(context, filters=filters)
auto_del = [p['id'] for p in ports
if p['device_owner'] in [constants.DEVICE_OWNER_DHCP]]
is_dhcp_backend_deleted = False
if auto_del:
filters = {'network_id': [id], 'enable_dhcp': [True]}
sids = self.get_subnets(context, filters=filters, fields=['id'])
if len(sids) > 0:
try:
self._cleanup_dhcp_edge_before_deletion(context, id)
self.edge_manager.delete_dhcp_edge_service(context, id)
is_dhcp_backend_deleted = True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to delete network')
for port_id in auto_del:
try:
self.delete_port(context.elevated(), port_id,
force_delete_dhcp=True)
except Exception as e:
LOG.warning('Unable to delete port %(port_id)s. '
'Reason: %(e)s',
{'port_id': port_id, 'e': e})
with db_api.CONTEXT_WRITER.using(context):
self._process_l3_delete(context, id)
# We would first delete subnet db if the backend dhcp service is
# deleted in case of entering delete_subnet logic and retrying
# to delete backend dhcp service again.
if is_dhcp_backend_deleted:
subnets = self._get_subnets_by_network(context, id)
for subnet in subnets:
self.base_delete_subnet(context, subnet['id'])
super(NsxVPluginV2, self).delete_network(context, id)
# Do not delete a predefined port group that was attached to
# an external network
if (bindings and
bindings[0].binding_type == c_utils.NsxVNetworkTypes.PORTGROUP):
if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id:
if self._is_neutron_spoofguard_policy(id, mappings[0].nsx_id,
sg_policy_id):
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
return
# Delete the backend network if necessary. This is done after
# the base operation as that may throw an exception in the case
# that there are ports defined on the network.
if mappings:
if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id:
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
edge_utils.check_network_in_use_at_backend(context, id)
for mapping in mappings:
self._delete_backend_network(
mapping.nsx_id, mapping.dvs_id)
def _extend_get_network_dict_provider(self, context, net):
self._extend_network_dict_provider(context, net)
net[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_network_policy_id(
context, net['id'])
def get_network(self, context, id, fields=None):
with db_api.CONTEXT_READER.using(context):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network,
context=context)
self._extend_get_network_dict_provider(context, net_result)
return db_utils.resource_fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
with db_api.CONTEXT_READER.using(context):
networks = (
super(NsxVPluginV2, self).get_networks(
context, filters, fields, sorts,
limit, marker, page_reverse))
for net in networks:
self._extend_get_network_dict_provider(context, net)
return (networks if not fields else
[db_utils.resource_fields(network,
fields) for network in networks])
def _raise_if_updates_provider_attributes(self, original_network, attrs,
az_dvs):
"""Raise exception if provider attributes are present.
For the NSX-V we want to allow changing the physical network of
vlan type networks.
"""
if (original_network.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.VLAN and
validators.is_attr_set(
attrs.get(pnet.PHYSICAL_NETWORK)) and
not validators.is_attr_set(
attrs.get(pnet.NETWORK_TYPE)) and
not validators.is_attr_set(
attrs.get(pnet.SEGMENTATION_ID))):
return
c_utils.raise_if_updates_provider_attributes(attrs)
def _update_vlan_network_dvs_ids(self, context, network,
new_physical_network, az_dvs):
"""Update the dvs ids of a vlan provider network
The new values will replace the old ones.
Actions done in this function:
- Create a backend network for each new dvs
- Delete the backend networks for the old ones.
- Return the relevant information in order to later also update
the spoofguard policy, qos, network object and DB
Returns:
- dvs_list_changed True/False
- dvs_pg_mappings - updated mapping of the elements dvs->moref
"""
dvs_pg_mappings = {}
current_dvs_ids = set(self._get_dvs_ids(
network[pnet.PHYSICAL_NETWORK], az_dvs))
new_dvs_ids = set(self._get_dvs_ids(
new_physical_network, az_dvs))
additional_dvs_ids = new_dvs_ids - current_dvs_ids
removed_dvs_ids = current_dvs_ids - new_dvs_ids
if not additional_dvs_ids and not removed_dvs_ids:
# no changes in the list of DVS
return False, dvs_pg_mappings
self._convert_to_transport_zones_dict(network)
# get the current mapping as in the DB
db_mapping = nsx_db.get_nsx_network_mappings(
context.session, network['id'])
for db_map in db_mapping:
dvs_pg_mappings[db_map.dvs_id] = db_map.nsx_id
# delete old backend networks
for dvs_id in removed_dvs_ids:
nsx_id = dvs_pg_mappings.get(dvs_id)
if nsx_id:
#Note(asarfaty) This may fail if there is a vm deployed, but
# since the delete is done offline we will not catch it here
self._delete_backend_network(nsx_id, dvs_id)
del dvs_pg_mappings[dvs_id]
# create all the new backend networks
for dvs_id in additional_dvs_ids:
try:
net_moref = self._create_vlan_network_at_backend(
dvs_id=dvs_id,
net_data=network)
except nsx_exc.NsxPluginException:
with excutils.save_and_reraise_exception():
# Delete VLAN networks on other DVSes if it
# fails to be created on one DVS and reraise
# the original exception.
for dvsmoref, netmoref in dvs_pg_mappings.items():
self._delete_backend_network(netmoref, dvsmoref)
dvs_pg_mappings[dvs_id] = net_moref
return True, dvs_pg_mappings
def _update_network_validate_port_sec(self, context, net_id, net_attrs):
if psec.PORTSECURITY in net_attrs and not net_attrs[psec.PORTSECURITY]:
# check if there are compute ports on this network
port_filters = {'network_id': [net_id],
'device_owner': ['compute:None']}
compute_ports = self.get_ports(context, filters=port_filters)
if compute_ports:
LOG.warning("Disabling port-security on network %s would "
"require instance in the network to have VM tools "
"installed in order for security-groups to "
"function properly.", net_id)
def allow_multiple_addresses_configure_spoofguard(self, context, id,
net_attrs, net_morefs):
# User requires multiple addresses to be assigned to compute port
# and therefore, the spoofguard policy is being removed for this net.
orig_net = self.get_network(context, id)
if not net_attrs[psec.PORTSECURITY]:
sg_pol = nsxv_db.get_spoofguard_policy_id(context.session,
orig_net['id'])
if sg_pol:
try:
self.nsx_v.vcns.delete_spoofguard_policy(sg_pol)
nsxv_db.del_nsxv_spoofguard_binding(context.session,
sg_pol)
except Exception as e:
LOG.error('Unable to delete spoofguard policy '
'%(sg_policy)s. Error: %(e)s',
{'sg_policy': sg_pol, 'e': e})
else:
LOG.warning("Could not locate spoofguard policy for "
"network %s", id)
# User requires port-security-enabled set to True and thus requires
# spoofguard installed for this network
else:
# Verifying that all ports are legal, i.e. not CIDR/subnet, and
# that the same IP address is not used multiple times for a given
# neutron network
filters = {'network_id': [id]}
ports = self.get_ports(context, filters=filters)
valid_ports = []
ip_addresses = set()
if ports:
for port in ports:
for ap in port[addr_apidef.ADDRESS_PAIRS]:
if len(ap['ip_address'].split('/')) > 1:
msg = _('Port %s contains CIDR/subnet, '
'which is not supported at the '
'backend ') % port['id']
raise n_exc.BadRequest(
resource='networks',
msg=msg)
set_len = len(ip_addresses)
ip_addresses.add(ap['ip_address'])
if len(ip_addresses) == set_len:
msg = _('IP address %(ip)s is allowed '
'by more than 1 logical port. '
'This is not supported by the '
'backend. Port security cannot '
'be enabled for network %(net)s') % {
'ip': ap['ip_address'], 'net': id}
LOG.error(msg)
raise n_exc.BadRequest(
resource='networks', msg=msg)
valid_ports.append(port)
try:
sg_policy_id, predefined = (
self._prepare_spoofguard_policy(
orig_net.get(pnet.NETWORK_TYPE), orig_net,
net_morefs))
if sg_policy_id:
nsxv_db.map_spoofguard_policy_for_network(
context.session,
orig_net['id'], sg_policy_id)
except Exception as e:
msg = _('Unable to create spoofguard policy, error: %('
'error)s, '
'net_morefs=%(net_morefs)s, network_id= %('
'network_id)s') % {'error': e, 'net_morefs':
net_morefs, 'network_id': orig_net}
raise n_exc.BadRequest(resource='spoofguard policy', msg=msg)
try:
for port in valid_ports:
vnic_idx = port.get(ext_vnic_idx.VNIC_INDEX)
device_id = port['device_id']
vnic_id = self._get_port_vnic_id(vnic_idx,
device_id)
self._update_vnic_assigned_addresses(context.session, port,
vnic_id)
except Exception as e:
msg = _('Unable to add port to spoofguard policy error '
'%s') % e
raise n_exc.BadRequest(resource='spoofguard policy',
msg=msg)
def update_network(self, context, id, network):
net_attrs = network['network']
orig_net = self.get_network(context, id)
az_dvs = self._get_network_az_dvs_id(orig_net)
self._raise_if_updates_provider_attributes(
orig_net, net_attrs, az_dvs)
if net_attrs.get("admin_state_up") is False:
raise NotImplementedError(_("admin_state_up=False networks "
"are not supported."))
ext_net = self._get_network(context, id)
if not ext_net.external:
net_morefs = nsx_db.get_nsx_switch_ids(context.session, id)
else:
net_morefs = []
backend_network = bool(len(net_morefs) > 0)
self._validate_network_qos(context, net_attrs, backend_network)
# PortSecurity validation checks
psec_update = (psec.PORTSECURITY in net_attrs and
orig_net[psec.PORTSECURITY] !=
net_attrs[psec.PORTSECURITY])
if psec_update:
self._update_network_validate_port_sec(context, id, net_attrs)
# Change spoofguard accordingly - either remove if
# port-security-enabled was set to false or add (with relevant ports)
# if set to true.
if (cfg.CONF.nsxv.spoofguard_enabled and
cfg.CONF.nsxv.allow_multiple_ip_addresses and psec_update):
self.allow_multiple_addresses_configure_spoofguard(context, id,
net_attrs,
net_morefs)
# Check if the physical network of a vlan provider network was updated
updated_morefs = False
if (net_attrs.get(pnet.PHYSICAL_NETWORK) and
orig_net.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.VLAN):
(updated_morefs,
new_dvs_pg_mappings) = self._update_vlan_network_dvs_ids(
context,
orig_net,
net_attrs[pnet.PHYSICAL_NETWORK],
az_dvs)
if updated_morefs:
net_morefs = list(new_dvs_pg_mappings.values())
with db_api.CONTEXT_WRITER.using(context):
net_res = super(NsxVPluginV2, self).update_network(context, id,
network)
self._extension_manager.process_update_network(context, net_attrs,
net_res)
self._process_network_port_security_update(
context, net_attrs, net_res)
self._process_l3_update(context, net_res, net_attrs)
self._extend_network_dict_provider(context, net_res)
if updated_morefs:
# delete old mapping before recreating all
nsx_db.delete_neutron_nsx_network_mapping(
session=context.session, neutron_id=id)
# Save netmoref to dvs id mappings for VLAN network
# type for future access.
dvs_ids = []
for dvs_id, netmoref in new_dvs_pg_mappings.items():
nsx_db.add_neutron_nsx_network_mapping(
session=context.session,
neutron_id=id,
nsx_switch_id=netmoref,
dvs_id=dvs_id)
dvs_ids.append(dvs_id)
all_dvs = ', '.join(sorted(dvs_ids))
net_res[pnet.PHYSICAL_NETWORK] = all_dvs
vlan_id = net_res.get(pnet.SEGMENTATION_ID)
nsxv_db.update_network_binding_phy_uuid(
context.session, id,
net_res.get(pnet.NETWORK_TYPE),
vlan_id, all_dvs)
# Updating SpoofGuard policy if exists, on failure revert to network
# old state
if (not ext_net.external and
cfg.CONF.nsxv.spoofguard_enabled and updated_morefs):
policy_id = nsxv_db.get_spoofguard_policy_id(context.session, id)
try:
# Always use enabled spoofguard policy. ports with disabled
# port security will be added to the exclude list
self.nsx_v.vcns.update_spoofguard_policy(
policy_id, net_morefs, id, True)
except Exception:
with excutils.save_and_reraise_exception():
revert_update = db_utils.resource_fields(
orig_net, ['shared', psec.PORTSECURITY])
self._process_network_port_security_update(
context, revert_update, net_res)
super(NsxVPluginV2, self).update_network(
context, id, {'network': revert_update})
# Handle QOS updates (Value can be None, meaning to delete the
# current policy), or moref updates with an existing qos policy
if (not ext_net.external and
(qos_consts.QOS_POLICY_ID in net_attrs) or
(updated_morefs and orig_net.get(qos_consts.QOS_POLICY_ID))):
# update the qos data
qos_policy_id = (net_attrs[qos_consts.QOS_POLICY_ID]
if qos_consts.QOS_POLICY_ID in net_attrs
else orig_net.get(qos_consts.QOS_POLICY_ID))
self._update_qos_on_backend_network(context, id, qos_policy_id)
# attach the policy to the network in neutron DB
qos_com_utils.update_network_policy_binding(
context, id, qos_policy_id)
net_res[qos_consts.QOS_POLICY_ID] = (
qos_com_utils.get_network_policy_id(context, id))
# Handle case of network name update - this only is relevant for
# networks that we create - not portgroup providers
if (net_attrs.get('name') and
orig_net.get('name') != net_attrs.get('name') and
(orig_net.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.VLAN or
orig_net.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.FLAT)):
# Only update networks created by plugin
mappings = nsx_db.get_nsx_network_mappings(context.session, id)
for mapping in mappings:
network_name = self._get_vlan_network_name(net_res,
mapping.dvs_id)
try:
self._vcm.update_port_groups_config(
mapping.dvs_id, id, mapping.nsx_id,
self._dvs.update_port_group_spec_name,
network_name)
except Exception as e:
LOG.error('Unable to update name for net %(net_id)s. '
'Error: %(e)s',
{'net_id': id, 'e': e})
return net_res
def _validate_unique_address_pair_across_network(self, context,
port, address_pairs):
network_id = port['network_id']
filters = {'network_id': [network_id]}
valid_existing_ports = []
existing_fixed_and_addr_pairs = []
for exist_port in self.get_ports(context, filters=filters):
if exist_port['id'] != port['id']:
valid_existing_ports.append(exist_port)
for valid_port in valid_existing_ports:
for fixed in valid_port.get('fixed_ips', []):
existing_fixed_and_addr_pairs.append(fixed['ip_address'])
for addr_pair in valid_port.get('allowed_address_pairs', []):
existing_fixed_and_addr_pairs.append(addr_pair['ip_address'])
fixed_ips_list = port.get('fixed_ips', [])
# validate ip collision with fixed ips
for fixed_ip in fixed_ips_list:
ip = fixed_ip.get('ip_address')
if ip in existing_fixed_and_addr_pairs:
msg = _('IP address %s entered as fixed ip already '
'exists in the network. Duplicate IP addresses is not '
'supported at backend') % ip
raise n_exc.InvalidInput(error_message=msg)
# validate ip collision with address pair
for pair in address_pairs:
ip = pair.get('ip_address')
if ip in existing_fixed_and_addr_pairs:
msg = _('IP address %s entered as address pair already '
'exists in the network. Duplicate IP addresses is not '
'supported at backend') % ip
raise n_exc.InvalidInput(error_message=msg)
def _verify_cidr_defined(self, attrs):
for ap in attrs[addr_apidef.ADDRESS_PAIRS]:
# Check that the IP address is a subnet
if len(ap['ip_address'].split('/')) > 1:
msg = _('NSXv does not support CIDR as address pairs')
raise n_exc.BadRequest(resource='address_pairs',
msg=msg)
def _validate_address_pairs(self, context, attrs, db_port):
# Ground rule - if spoofguard exists: all tests must take place.
policy_id = nsxv_db.get_spoofguard_policy_id(context.session,
db_port['network_id'])
if policy_id:
self._validate_unique_address_pair_across_network(
context, db_port, attrs[addr_apidef.ADDRESS_PAIRS])
self._verify_cidr_defined(attrs)
# Check that the MAC address is the same as the port
for ap in attrs[addr_apidef.ADDRESS_PAIRS]:
if ('mac_address' in ap and
ap['mac_address'] != db_port['mac_address']):
msg = _('Address pairs should have same MAC as the '
'port')
raise n_exc.BadRequest(resource='address_pairs', msg=msg)
def _is_mac_in_use(self, context, network_id, mac_address):
# Override this method as the backed doesn't support using the same
# mac twice on any network, not just this specific network
admin_ctx = context.elevated()
return bool(admin_ctx.session.query(models_v2.Port).
filter(models_v2.Port.mac_address == mac_address).
count())
@db_api.retry_db_errors
def base_create_port(self, context, port):
created_port = super(NsxVPluginV2, self).create_port(context, port)
self._extension_manager.process_create_port(
context, port['port'], created_port)
return created_port
def _validate_extra_dhcp_options(self, opts):
if not opts:
return
for opt in opts:
opt_name = opt['opt_name']
opt_val = opt['opt_value']
if opt_name == 'classless-static-route':
# separate validation for option121
if opt_val is not None:
try:
net, ip = opt_val.split(',')
except Exception:
msg = (_("Bad value %(val)s for DHCP option "
"%(name)s") % {'name': opt_name,
'val': opt_val})
raise n_exc.InvalidInput(error_message=msg)
elif opt_name not in vcns_const.SUPPORTED_DHCP_OPTIONS:
try:
option = int(opt_name)
except ValueError:
option = 255
if option >= 255:
msg = (_("DHCP option %s is not supported") % opt_name)
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _validate_port_qos(self, port):
if validators.is_attr_set(port.get(qos_consts.QOS_POLICY_ID)):
err_msg = (_("Cannot configure QOS directly on ports"))
raise n_exc.InvalidInput(error_message=err_msg)
def _assert_on_lb_port_admin_state(self, port_data, original_port,
device_owner):
if device_owner in [constants.DEVICE_OWNER_LOADBALANCERV2,
oct_const.DEVICE_OWNER_OCTAVIA]:
orig_state = original_port.get("admin_state_up")
new_state = port_data.get("admin_state_up")
if new_state is not None and (orig_state != new_state):
err_msg = _("Changing admin_state for "
"loadbalancer's internal port is not supported")
LOG.warning(err_msg)
raise n_exc.InvalidInput(error_message=err_msg)
def create_port(self, context, port):
port_data = port['port']
dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS)
self._validate_extra_dhcp_options(dhcp_opts)
self._validate_max_ips_per_port(port_data.get('fixed_ips', []),
port_data.get('device_owner'))
self._validate_port_qos(port_data)
direct_vnic_type = self._validate_port_vnic_type(
context, port_data, port_data['network_id'])
with db_api.CONTEXT_WRITER.using(context):
# First we allocate port in neutron database
neutron_db = super(NsxVPluginV2, self).create_port(context, port)
self._extension_manager.process_create_port(
context, port_data, neutron_db)
# Port port-security is decided based on port's vnic_type and ports
# network port-security state (unless explicitly requested
# differently by the user).
if not cfg.CONF.nsxv.spoofguard_enabled:
port_security = False
else:
port_security = port_data.get(psec.PORTSECURITY)
if validators.is_attr_set(port_security):
# 'direct' and 'direct-physical' vnic types ports requires
# port-security to be disabled.
if direct_vnic_type and port_security:
err_msg = _("Security features are not supported for "
"ports with direct/direct-physical VNIC type")
raise n_exc.InvalidInput(error_message=err_msg)
elif direct_vnic_type:
# Implicitly disable port-security for direct vnic types.
port_security = False
else:
port_security = self._get_network_security_binding(
context, neutron_db['network_id'])
port_data[psec.PORTSECURITY] = port_security
provider_sg_specified = (validators.is_attr_set(
port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and
port_data[provider_sg.PROVIDER_SECURITYGROUPS] != [])
has_security_groups = (
self._check_update_has_security_groups(port))
self._process_port_port_security_create(
context, port_data, neutron_db)
self._process_portbindings_create_and_update(
context, port_data, neutron_db)
# Update fields obtained from neutron db (eg: MAC address)
port["port"].update(neutron_db)
has_ip = self._ip_on_port(neutron_db)
# allowed address pair checks
attrs = port[port_def.RESOURCE_NAME]
if self._check_update_has_allowed_address_pairs(port):
if not port_security:
raise addr_exc.AddressPairAndPortSecurityRequired()
self._validate_address_pairs(context, attrs, neutron_db)
else:
# remove ATTR_NOT_SPECIFIED
attrs[addr_apidef.ADDRESS_PAIRS] = []
# security group extension checks
if has_ip and port_security:
self._ensure_default_security_group_on_port(context, port)
(sgids, ssgids) = self._get_port_security_groups_lists(
context, port)
elif (has_security_groups or provider_sg_specified):
LOG.error("Port has conflicting port security status and "
"security groups")
raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups()
else:
sgids = ssgids = []
self._process_port_create_security_group(context, port_data, sgids)
self._process_port_create_provider_security_group(context,
port_data,
ssgids)
neutron_db[addr_apidef.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, neutron_db,
attrs.get(addr_apidef.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(
context, port_data, dhcp_opts)
# MAC learning - only update DB. Can only update NSX when the port
# exists - this is done via update
if validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)):
if (((has_ip and port_security) or
has_security_groups or provider_sg_specified) and
port_data.get(mac_ext.MAC_LEARNING) is True):
err_msg = _("Security features are not supported for "
"mac learning")
raise n_exc.InvalidInput(error_message=err_msg)
self._create_mac_learning_state(context, port_data)
elif mac_ext.MAC_LEARNING in port_data:
# This is due to the fact that the default is
# ATTR_NOT_SPECIFIED
port_data.pop(mac_ext.MAC_LEARNING)
try:
# Configure NSX - this should not be done in the DB transaction
# Configure the DHCP Edge service
self._create_dhcp_static_binding(context, port_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create port')
# Revert what we have created and raise the exception
self.delete_port(context, port_data['id'])
# this extra lookup is necessary to get the
# latest db model for the extension functions
port_model = self._get_port(context, port_data['id'])
resource_extend.apply_funcs('ports', port_data, port_model)
self._remove_provider_security_groups_from_list(port_data)
self._extend_nsx_port_dict_binding(context, port_data)
kwargs = {'context': context, 'port': neutron_db}
registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs)
return port_data
def _make_port_dict(self, port, fields=None,
process_extensions=True, bulk=False):
port_data = super(NsxVPluginV2, self)._make_port_dict(
port, fields=fields,
process_extensions=process_extensions, bulk=bulk)
self._remove_provider_security_groups_from_list(port_data)
return port_data
def _get_port_subnet_mask(self, context, port):
if len(port['fixed_ips']) > 0 and 'subnet_id' in port['fixed_ips'][0]:
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet = self._get_subnet_object(context, subnet_id)
return str(netaddr.IPNetwork(subnet.cidr).netmask)
def _get_port_fixed_ip_addr(self, port):
if (len(port['fixed_ips']) > 0 and
'ip_address' in port['fixed_ips'][0]):
return port['fixed_ips'][0]['ip_address']
def _count_no_sec_ports_for_device_id(self, context, device_id):
"""Find how many compute ports with this device ID and no security
there are, so we can decide on adding / removing the device from
the exclusion list
"""
filters = {'device_id': [device_id]}
device_ports = self.get_ports(context.elevated(), filters=filters)
ports = [port for port in device_ports
if port['device_owner'].startswith('compute')]
return len([p for p in ports
if validators.is_attr_set(p.get(ext_vnic_idx.VNIC_INDEX)) and
not p[psec.PORTSECURITY]])
def _add_vm_to_exclude_list(self, context, device_id, port_id):
if (self._vcm and
cfg.CONF.nsxv.use_exclude_list):
# first time for this vm (we expect the count to be 1 already
# because the DB was already updated)
if (self._count_no_sec_ports_for_device_id(
context, device_id) <= 1):
vm_moref = self._vcm.get_vm_moref(device_id)
if vm_moref is not None:
try:
LOG.info("Add VM %(dev)s to exclude list on "
"behalf of port %(port)s: added to "
"list",
{"dev": device_id, "port": port_id})
self.nsx_v.vcns.add_vm_to_exclude_list(vm_moref)
except vsh_exc.RequestBad as e:
LOG.error("Failed to add vm %(device)s "
"moref %(moref)s to exclude list: "
"%(err)s",
{'device': device_id, 'moref': vm_moref,
'err': e})
else:
LOG.info("Add VM %(dev)s to exclude list on behalf of "
"port %(port)s: VM already in list",
{"dev": device_id, "port": port_id})
loose_ver = version.LooseVersion(self.nsx_v.vcns.get_version())
if loose_ver < version.LooseVersion('6.3.3'):
LOG.info("Syncing firewall")
self.nsx_v.vcns.sync_firewall()
def _remove_vm_from_exclude_list(self, context, device_id, port_id,
expected_count=0):
if (self._vcm and
cfg.CONF.nsxv.use_exclude_list):
# No ports left in DB (expected count is 0 or 1 depending
# on whether the DB was already updated),
# So we can remove it from the backend exclude list
if (self._count_no_sec_ports_for_device_id(
context, device_id) <= expected_count):
vm_moref = self._vcm.get_vm_moref(device_id)
if vm_moref is not None:
try:
LOG.info("Remove VM %(dev)s from exclude list on "
"behalf of port %(port)s: removed from "
"list",
{"dev": device_id, "port": port_id})
self.nsx_v.vcns.delete_vm_from_exclude_list(vm_moref)
except vsh_exc.RequestBad as e:
LOG.error("Failed to delete vm %(device)s "
"moref %(moref)s from exclude list: "
"%(err)s",
{'device': device_id, 'moref': vm_moref,
'err': e})
else:
LOG.info("Remove VM %(dev)s from exclude list on behalf "
"of port %(port)s: other ports still in list",
{"dev": device_id, "port": port_id})
def update_port(self, context, id, port):
with locking.LockManager.get_lock('port-update-%s' % id):
original_port = super(NsxVPluginV2, self).get_port(context, id)
self._extend_get_port_dict_qos_and_binding(context, original_port)
is_compute_port = self._is_compute_port(original_port)
device_id = original_port['device_id']
if is_compute_port and device_id:
# Lock on the device ID to make sure we do not change/delete
# ports of the same device at the same time
with locking.LockManager.get_lock(
'port-device-%s' % device_id):
return self._update_port(context, id, port, original_port,
is_compute_port, device_id)
else:
return self._update_port(context, id, port, original_port,
is_compute_port, device_id)
def _update_dhcp_address(self, context, network_id):
with locking.LockManager.get_lock('dhcp-update-%s' % network_id):
address_groups = self._create_network_dhcp_address_group(
context, network_id)
self.edge_manager.update_dhcp_edge_service(
context, network_id, address_groups=address_groups)
def _nsx_update_mac_learning(self, context, port):
net_id = port['network_id']
# default dvs for this network
az = self.get_network_az_by_net_id(context, net_id)
az_dvs_id = az.dvs_id
# get the network moref/s from the db
net_mappings = nsx_db.get_nsx_network_mappings(
context.session, net_id)
for mapping in net_mappings:
dvs_id = mapping.dvs_id or az_dvs_id
try:
self._vcm.update_port_groups_config(
dvs_id, net_id, mapping.nsx_id,
self._vcm.update_port_group_security_policy, True)
except Exception as e:
LOG.error("Unable to update network security override "
"policy: %s", e)
return
self._vcm.update_port_security_policy(
dvs_id, net_id, mapping.nsx_id,
port['device_id'], port['mac_address'],
port[mac_ext.MAC_LEARNING])
def _update_port(self, context, id, port, original_port, is_compute_port,
device_id):
attrs = port[port_def.RESOURCE_NAME]
port_data = port['port']
dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS)
self._validate_extra_dhcp_options(dhcp_opts)
self._validate_port_qos(port_data)
if addr_apidef.ADDRESS_PAIRS in attrs:
self._validate_address_pairs(context, attrs, original_port)
self._validate_max_ips_per_port(
port_data.get('fixed_ips', []),
port_data.get('device_owner', original_port['device_owner']))
orig_has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and
original_port[psec.PORTSECURITY])
orig_device_owner = original_port.get('device_owner')
self._assert_on_lb_port_admin_state(port_data, original_port,
orig_device_owner)
port_mac_change = port_data.get('mac_address') is not None
port_ip_change = port_data.get('fixed_ips') is not None
device_owner_change = port_data.get('device_owner') is not None
# We do not support updating the port ip and device owner together
if port_ip_change and device_owner_change:
msg = (_('Cannot set fixed ips and device owner together for port '
'%s') % original_port['id'])
raise n_exc.BadRequest(resource='port', msg=msg)
# Check if port security has changed
port_sec_change = False
has_port_security = orig_has_port_security
if (psec.PORTSECURITY in port_data and
port_data[psec.PORTSECURITY] != original_port[psec.PORTSECURITY]):
port_sec_change = True
has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and
port_data[psec.PORTSECURITY])
# Address pairs require port security
if (not has_port_security and
(original_port[addr_apidef.ADDRESS_PAIRS] or
addr_apidef.ADDRESS_PAIRS in attrs)):
msg = _('Address pairs require port security enabled')
raise n_exc.BadRequest(resource='port', msg=msg)
# TODO(roeyc): create a method '_process_vnic_index_update' from the
# following code block
# Process update for vnic-index
vnic_idx = port_data.get(ext_vnic_idx.VNIC_INDEX)
# Only set the vnic index for a compute VM
if validators.is_attr_set(vnic_idx) and is_compute_port:
# Update database only if vnic index was changed
if original_port.get(ext_vnic_idx.VNIC_INDEX) != vnic_idx:
self._set_port_vnic_index_mapping(
context, id, device_id, vnic_idx)
vnic_id = self._get_port_vnic_id(vnic_idx, device_id)
self._add_security_groups_port_mapping(
context.session, vnic_id,
original_port[ext_sg.SECURITYGROUPS] +
original_port[provider_sg.PROVIDER_SECURITYGROUPS])
if has_port_security:
LOG.debug("Assigning vnic port fixed-ips: port %s, "
"vnic %s, with fixed-ips %s", id, vnic_id,
original_port['fixed_ips'])
self._update_vnic_assigned_addresses(
context.session, original_port, vnic_id)
if (cfg.CONF.nsxv.use_default_block_all and
not original_port[ext_sg.SECURITYGROUPS]):
self._add_member_to_security_group(
self.sg_container_id, vnic_id)
else:
# Add vm to the exclusion list, since it has no port security
self._add_vm_to_exclude_list(context, device_id, id)
# if service insertion is enabled - add this vnic to the service
# insertion security group
if self._si_handler.enabled and original_port[psec.PORTSECURITY]:
self._add_member_to_security_group(self._si_handler.sg_id,
vnic_id)
provider_sgs_specified = validators.is_attr_set(
port_data.get(provider_sg.PROVIDER_SECURITYGROUPS))
delete_provider_sg = provider_sgs_specified and (
port_data[provider_sg.PROVIDER_SECURITYGROUPS] != [])
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
comp_owner_update = ('device_owner' in port_data and
port_data['device_owner'].startswith('compute:'))
direct_vnic_type = self._validate_port_vnic_type(
context, port_data, original_port['network_id'])
if direct_vnic_type and has_port_security:
err_msg = _("Security features are not supported for "
"ports with direct/direct-physical VNIC type")
raise n_exc.InvalidInput(error_message=err_msg)
old_mac_learning_state = original_port.get(mac_ext.MAC_LEARNING)
if has_port_security:
if ((mac_ext.MAC_LEARNING in port_data and